Last active
March 13, 2026 18:55
-
-
Save aurotripathy/e06262f466b9f98da6f453f7a5b6c5fb to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| mport onnxruntime as ort | |
| import numpy as np | |
| import torch | |
| print(f"onnx runtime version:{ort.__version__}") | |
| # Check if PyTorch can see an available GPU \ | |
| if torch.cuda.is_available(): | |
| print(f"CUDA is available: {torch.cuda.is_available()}") | |
| # Print the CUDA version that PyTorch was compiled against \ | |
| print(f"PyTorch built with CUDA version: {torch.version.cuda}") | |
| # Optional: Print the cuDNN version (if enabled) \ | |
| print(f"cuDNN version: {torch.backends.cudnn.version()}") | |
| else: | |
| print("PyTorch was likely installed without CUDA support or an NVIDIA driver issue exists.") | |
| print(f"PyTorch built with CUDA version (might be None): {torch.version.cuda}") | |
| # Cast the input array to float32 (float), fill random data \ | |
| x = np.random.rand(1, 3, 512, 512).astype(np.float32) | |
| sess = ort.InferenceSession("onnx-files/detr_1_3_512_512.onnx", providers=['CUDAExecutionProvider']) | |
| input_name = sess.get_inputs()[0].name | |
| ort_inputs = {input_name: x} | |
| outputs = sess.run(None, ort_inputs) | |
| print(f"Detector shape: {outputs[0].shape}, {outputs[1].shape}") | |
| # load next model | |
| x = np.random.rand(10000, 32, 10).astype(np.float16) | |
| try: | |
| sess = ort.InferenceSession("onnx-files/pointpillar_custom.onnx", providers=['CUDAExecutionProvider']) | |
| input_name = sess.get_inputs()[0].name | |
| print(f"input name: {input_name}") | |
| ort_inputs = {input_name: x} | |
| outputs = sess.run(None, ort_inputs) | |
| print(f"Output shape: {outputs.shape}") | |
| except Exception as e: | |
| print(f"{type(e)}: {e}") | |
| # load next model | |
| x = np.random.rand(1, 3, 416, 640).astype(np.float32) | |
| try: | |
| sess = ort.InferenceSession("onnx-files/resnet34_1_3_416_640.onnx", providers=['CUDAExecutionProvider']) | |
| input_name = sess.get_inputs()[0].name | |
| print(f"input name: {input_name}") | |
| ort_inputs = {input_name: x} | |
| outputs = sess.run(None, ort_inputs) | |
| print(f"Classifier shape: {outputs[0].shape}") | |
| except Exception as e: | |
| print("Unexpected type") | |
| print(f"{type(e)}: {e}") |
Author
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
# requirements
# for cuda 12 and cudnn 9