Created
March 10, 2026 19:06
-
-
Save aurotripathy/efafdad3995395947e72b628dba3852b to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import onnxruntime | |
| import numpy as np | |
| from ultralytics import YOLO | |
| # Load a pretrained YOLOv8 model | |
| model = YOLO('yolov8n.pt') | |
| # Export to ONNX format | |
| model.export(format='onnx') | |
| # 1. Load the ONNX model into an Inference Session | |
| # Specify execution providers, e.g., for GPU/CPU | |
| session = onnxruntime.InferenceSession("yolov8n.onnx", providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) | |
| # 2. Prepare the input data (example with a dummy input) | |
| # The input name should match the name defined in the model (check with Netron) | |
| input_name = session.get_inputs()[0].name | |
| # Create a dummy input tensor matching the model's expected shape and type | |
| # (e.g., a float32 tensor of shape [1, 3, 224, 224] for an image model) | |
| input_data = np.random.rand(1, 3, 640, 640).astype(np.float32) | |
| inputs = {input_name: input_data} | |
| # 3. Run the model | |
| # Pass an empty list for the first argument to get all outputs | |
| outputs = session.run(None, inputs) | |
| # 4. Post-process the output | |
| # The result is a list of outputs; access the first one here | |
| model_output = outputs[0] | |
| print(model_output) | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.