added inference vstream loading

This commit is contained in:
Nickiel12 2024-10-16 00:52:17 +00:00
parent 93fc8addc4
commit c3112f90b0

View file

@ -13,10 +13,10 @@ const max_edge_layers = 32;
pub fn main() void {
// var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
// defer arena.deinit();
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
// const alloc = arena.allocator();
const alloc = arena.allocator();
std.fs.cwd().access(hef_file, .{ }) catch |e| {
std.debug.panic("Could not open hef file! '{any}'", .{ e });
@ -85,6 +85,34 @@ pub fn main() void {
std.debug.print("Output_vstream_size is: '{d}'\n", .{ output_vstream_size });
std.debug.print("Input_vstream_size is: '{d}'\n", .{ input_vstream_size });
var input_frame_size: usize = 0;
status = hlo.hailo_get_input_vstream_frame_size(input_vstreams, &input_frame_size);
assert(status == hlo.HAILO_SUCCESS);
const frame_count = input_frame_size / @sizeOf(u8);
std.debug.print("Trying to create {d} u8s\n", .{ frame_count });
var input_data = try alloc.create(u8, frame_count);
var rnd = std.Random.init(0);
for (0..frame_count) |i| {
input_data[i] = rnd.random().int(u8);
}
// Here we simulate taking our data and chunking it into frame_size, then uploading each frame until we have uploaded
// all of our data
for (0..frame_count) |_| {
status = hlo.hailo_vstream_write_raw_buffer(input_vstreams, input_data, input_frame_size);
assert(status == hlo.HAILO_SUCCESS);
}
status = hlo.hailo_flush_input_vstream(input_vstreams);
assert(status == hlo.HAILO_SUCCESS);
_ = hlo.hailo_release_output_vstreams(&output_vstreams, output_vstream_size);
_ = hlo.hailo_release_input_vstreams(&input_vstreams, input_vstream_size);
_ = hlo.hailo_release_hef(hef);