From e6a4ca56a3fbc604f615bc2c8293b76c1a4b1cab Mon Sep 17 00:00:00 2001 From: Anstarc Date: Tue, 5 May 2026 14:38:56 +0800 Subject: [PATCH 1/4] =?UTF-8?q?feat:=20=E6=B7=BB=E5=8A=A0=20ONNX=20Runtime?= =?UTF-8?q?=201.24.4=20GPU=20=E7=89=88=E6=9C=AC=E6=94=AF=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- frameworks/onnxruntime/1.24.4-gpu/Dockerfile | 32 +++ frameworks/onnxruntime/1.24.4-gpu/README.md | 85 +++++++ frameworks/onnxruntime/1.24.4-gpu/build.conf | 4 + frameworks/onnxruntime/1.24.4-gpu/test.sh | 243 +++++++++++++++++++ 4 files changed, 364 insertions(+) create mode 100644 frameworks/onnxruntime/1.24.4-gpu/Dockerfile create mode 100644 frameworks/onnxruntime/1.24.4-gpu/README.md create mode 100644 frameworks/onnxruntime/1.24.4-gpu/build.conf create mode 100755 frameworks/onnxruntime/1.24.4-gpu/test.sh diff --git a/frameworks/onnxruntime/1.24.4-gpu/Dockerfile b/frameworks/onnxruntime/1.24.4-gpu/Dockerfile new file mode 100644 index 0000000..79dde7f --- /dev/null +++ b/frameworks/onnxruntime/1.24.4-gpu/Dockerfile @@ -0,0 +1,32 @@ +FROM opencloudos/opencloudos9-cuda-devel:12.8 + +LABEL maintainer="Anstarc " +LABEL org.opencontainers.image.source="https://gitee.com/OpenCloudOS/ai-agent-container" +LABEL org.opencontainers.image.description="ONNX Runtime 1.24.4 GPU on OpenCloudOS 9" + +# 安装系统依赖 +RUN dnf install -y \ + python3.11 \ + python3.11-pip \ + git \ + wget \ + && dnf clean all \ + && rm -rf /var/cache/yum/* + +# 设置工作目录 +WORKDIR /app + +# 安装 ONNX Runtime GPU 版本及依赖 +# 注意:onnx 需要 1.20 以兼容 onnxruntime 1.24.4 (opset 25) +RUN pip3.11 install --no-cache-dir \ + onnxruntime-gpu==1.24.4 \ + onnx==1.20.1 \ + numpy \ + protobuf + +# 设置 GPU 环境变量 +ENV NVIDIA_VISIBLE_DEVICES=all +ENV CUDA_MODULE_LOADING=LAZY + +# 默认命令 +CMD ["python3.11", "-c", "import onnxruntime; print(f'ONNX Runtime {onnxruntime.__version__} ready')"] diff --git a/frameworks/onnxruntime/1.24.4-gpu/README.md b/frameworks/onnxruntime/1.24.4-gpu/README.md new file mode 100644 index 0000000..fa03bed --- /dev/null +++ b/frameworks/onnxruntime/1.24.4-gpu/README.md @@ -0,0 +1,85 @@ +# ONNX Runtime 1.24.4 GPU on OpenCloudOS 9 + +ONNX Runtime 是微软开源的跨平台机器学习推理加速器,支持多种深度学习框架(PyTorch、TensorFlow 等)训练的模型。 + +## 基本信息 + +- **基础镜像**: opencloudos/opencloudos9-cuda-devel:12.8 +- **Python 版本**: 3.11 +- **CUDA 版本**: 12.8 +- **ONNX Runtime 版本**: 1.24.4 (GPU) +- **ONNX 版本**: 1.20.1 (兼容 opset 25) + +## 构建 + +```bash +docker build -t oc9-onnxruntime-gpu:1.24.4 . +``` + +## 测试 + +```bash +./test.sh oc9-onnxruntime-gpu:1.24.4 +``` + +测试项包括: +- Python 和 CUDA 环境 +- ONNX Runtime 版本验证 +- GPU 推理功能 +- ONNX ML 域支持 (ai.onnx.ml) +- ONNX IR 版本兼容性 (v3-v10) + +## 使用示例 + +### 基本使用 + +```bash +# 查看版本信息 +docker run --rm oc9-onnxruntime-gpu:1.24.4 python3.11 -c "import onnxruntime; print(onnxruntime.__version__)" + +# 查看可用的 Execution Providers +docker run --rm --gpus all oc9-onnxruntime-gpu:1.24.4 python3.11 -c "import onnxruntime as ort; print(ort.get_available_providers())" +``` + +### GPU 推理 + +```python +import onnxruntime as ort +import numpy as np + +# 加载模型(使用 CUDA Provider) +session = ort.InferenceSession( + "model.onnx", + providers=['CUDAExecutionProvider', 'CPUExecutionProvider'] +) + +# 准备输入数据 +input_name = session.get_inputs()[0].name +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) + +# 运行推理 +outputs = session.run(None, {input_name: input_data}) +``` + +### 交互式使用 + +```bash +docker run --rm -it --gpus all oc9-onnxruntime-gpu:1.24.4 bash +``` + +## 支持的 Opset 和 IR 版本 + +- **ai.onnx**: opset 1-25 (标准推理算子,支持 GPU) +- **ai.onnx.ml**: opset 1-5 (传统机器学习算子,CPU only) +- **ONNX IR**: v3-v10 (2017-2024,完全向后兼容) + +## 系统要求 + +- **GPU**: NVIDIA GPU with CUDA 12.x support +- **显存**: 建议 4GB+ +- **Docker**: 19.03+ with nvidia-docker2 + +## 参考资源 + +- [ONNX Runtime 官方文档](https://onnxruntime.ai/docs/) +- [CUDA Execution Provider](https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html) diff --git a/frameworks/onnxruntime/1.24.4-gpu/build.conf b/frameworks/onnxruntime/1.24.4-gpu/build.conf new file mode 100644 index 0000000..13e9c2f --- /dev/null +++ b/frameworks/onnxruntime/1.24.4-gpu/build.conf @@ -0,0 +1,4 @@ +# ONNX Runtime 1.24.4 GPU on OpenCloudOS 9 +IMAGE_NAME=oc9-onnxruntime-gpu +IMAGE_TAG=1.24.4 +GPU_TEST=true diff --git a/frameworks/onnxruntime/1.24.4-gpu/test.sh b/frameworks/onnxruntime/1.24.4-gpu/test.sh new file mode 100755 index 0000000..78e90cd --- /dev/null +++ b/frameworks/onnxruntime/1.24.4-gpu/test.sh @@ -0,0 +1,243 @@ +#!/bin/bash +set -e + +IMAGE="${1:-oc9-onnxruntime-gpu:1.24.4}" + +echo "==========================================" +echo "测试 ONNX Runtime 1.24.4 GPU 镜像" +echo "镜像: $IMAGE" +echo "==========================================" + +# 测试 1: Python 和 CUDA 环境 +echo -n "测试 1: Python 和 CUDA 环境... " +docker run --rm "$IMAGE" bash -c "python3.11 --version && nvcc --version" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" bash -c "python3.11 --version && nvcc --version" + exit 1 +fi + +# 测试 2: ONNX Runtime 版本 +echo -n "测试 2: ONNX Runtime 版本... " +VERSION=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime; print(onnxruntime.__version__)") +if [ "$VERSION" = "1.24.4" ]; then + echo "✓ (版本: $VERSION)" +else + echo "✗ (期望: 1.24.4, 实际: $VERSION)" + exit 1 +fi + +# 测试 3: 核心依赖 +echo -n "测试 3: 核心依赖 (numpy, protobuf)... " +docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" + exit 1 +fi + +# 测试 4: 可用的 Execution Providers +echo -n "测试 4: 可用的 Execution Providers... " +PROVIDERS=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime as ort; print(','.join(ort.get_available_providers()))") +echo "✓" +echo " 可用 Providers: $PROVIDERS" + +# 测试 5: GPU 环境检查 +echo -n "测试 5: GPU 环境检查... " +if command -v nvidia-smi &> /dev/null; then + GPU_COUNT=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l) + echo "✓ (检测到 $GPU_COUNT 个 GPU)" + + # 测试 6: CUDA Execution Provider + echo -n "测试 6: CUDA Execution Provider... " + HAS_CUDA=$(docker run --rm --gpus all "$IMAGE" python3.11 -c "import onnxruntime as ort; print('CUDAExecutionProvider' in ort.get_available_providers())") + if [ "$HAS_CUDA" = "True" ]; then + echo "✓" + else + echo "✗ (CUDA Provider 不可用)" + exit 1 + fi + + # 测试 7: 简单推理测试(使用 CUDA) + echo -n "测试 7: 简单推理测试 (CUDA)... " + docker run --rm --gpus all "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np + +# 创建简单的 ONNX 模型(identity 操作) +from onnxruntime import InferenceSession +import onnx +from onnx import helper, TensorProto + +# 创建一个简单的 identity 模型 +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) + +# 保存模型 +onnx.save(model, '/tmp/test.onnx') + +# 使用 CUDA Provider 进行推理 +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) + +print('推理成功') +" > /dev/null 2>&1 + if [ $? -eq 0 ]; then + echo "✓" + else + echo "✗" + docker run --rm --gpus all "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" + exit 1 + fi +else + echo "⊘ (未检测到 GPU,跳过 GPU 测试)" + echo "测试 6-7: 跳过 (需要 GPU 环境)" +fi + +# 测试 8: ONNX ML 域支持 +echo -n "测试 8: ONNX ML 域 (ai.onnx.ml)... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试 LabelEncoder +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +assert list(output[0]) == [0, 1], 'ML domain test failed' +print('ML domain OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +print('Output:', output[0]) +" + exit 1 +fi + +# 测试 9: ONNX IR 版本兼容性 +echo -n "测试 9: ONNX IR 版本兼容性... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试多个 IR 版本 +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + +print('IR versions OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓ (测试 IR v3, v5, v7, v9)" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + print(f'IR v{ir_ver} OK') +" + exit 1 +fi + +echo "" +echo "==========================================" +echo "所有测试通过!" +echo "==========================================" -- Gitee From bc4962860e6e72b1f1592a7c6856bfacb59444ea Mon Sep 17 00:00:00 2001 From: Anstarc Date: Tue, 5 May 2026 15:47:08 +0800 Subject: [PATCH 2/4] =?UTF-8?q?feat:=20=E6=B7=BB=E5=8A=A0=20ONNX=20Runtime?= =?UTF-8?q?=201.24.1-1.24.4=20CPU=20=E5=92=8C=20GPU=20=E7=89=88=E6=9C=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- frameworks/onnxruntime/1.24.1-cpu/Dockerfile | 28 +++ frameworks/onnxruntime/1.24.1-cpu/README.md | 82 +++++++ frameworks/onnxruntime/1.24.1-cpu/build.conf | 4 + frameworks/onnxruntime/1.24.1-cpu/test.sh | 217 +++++++++++++++++ frameworks/onnxruntime/1.24.1-gpu/Dockerfile | 32 +++ frameworks/onnxruntime/1.24.1-gpu/README.md | 85 +++++++ frameworks/onnxruntime/1.24.1-gpu/build.conf | 4 + frameworks/onnxruntime/1.24.1-gpu/test.sh | 243 +++++++++++++++++++ frameworks/onnxruntime/1.24.2-cpu/Dockerfile | 28 +++ frameworks/onnxruntime/1.24.2-cpu/README.md | 82 +++++++ frameworks/onnxruntime/1.24.2-cpu/build.conf | 4 + frameworks/onnxruntime/1.24.2-cpu/test.sh | 217 +++++++++++++++++ frameworks/onnxruntime/1.24.2-gpu/Dockerfile | 32 +++ frameworks/onnxruntime/1.24.2-gpu/README.md | 85 +++++++ frameworks/onnxruntime/1.24.2-gpu/build.conf | 4 + frameworks/onnxruntime/1.24.2-gpu/test.sh | 243 +++++++++++++++++++ frameworks/onnxruntime/1.24.3-cpu/Dockerfile | 28 +++ frameworks/onnxruntime/1.24.3-cpu/README.md | 82 +++++++ frameworks/onnxruntime/1.24.3-cpu/build.conf | 4 + frameworks/onnxruntime/1.24.3-cpu/test.sh | 217 +++++++++++++++++ frameworks/onnxruntime/1.24.3-gpu/Dockerfile | 32 +++ frameworks/onnxruntime/1.24.3-gpu/README.md | 85 +++++++ frameworks/onnxruntime/1.24.3-gpu/build.conf | 4 + frameworks/onnxruntime/1.24.3-gpu/test.sh | 243 +++++++++++++++++++ frameworks/onnxruntime/1.24.4-cpu/Dockerfile | 28 +++ frameworks/onnxruntime/1.24.4-cpu/README.md | 82 +++++++ frameworks/onnxruntime/1.24.4-cpu/build.conf | 4 + frameworks/onnxruntime/1.24.4-cpu/test.sh | 217 +++++++++++++++++ 28 files changed, 2416 insertions(+) create mode 100644 frameworks/onnxruntime/1.24.1-cpu/Dockerfile create mode 100644 frameworks/onnxruntime/1.24.1-cpu/README.md create mode 100644 frameworks/onnxruntime/1.24.1-cpu/build.conf create mode 100755 frameworks/onnxruntime/1.24.1-cpu/test.sh create mode 100644 frameworks/onnxruntime/1.24.1-gpu/Dockerfile create mode 100644 frameworks/onnxruntime/1.24.1-gpu/README.md create mode 100644 frameworks/onnxruntime/1.24.1-gpu/build.conf create mode 100755 frameworks/onnxruntime/1.24.1-gpu/test.sh create mode 100644 frameworks/onnxruntime/1.24.2-cpu/Dockerfile create mode 100644 frameworks/onnxruntime/1.24.2-cpu/README.md create mode 100644 frameworks/onnxruntime/1.24.2-cpu/build.conf create mode 100755 frameworks/onnxruntime/1.24.2-cpu/test.sh create mode 100644 frameworks/onnxruntime/1.24.2-gpu/Dockerfile create mode 100644 frameworks/onnxruntime/1.24.2-gpu/README.md create mode 100644 frameworks/onnxruntime/1.24.2-gpu/build.conf create mode 100755 frameworks/onnxruntime/1.24.2-gpu/test.sh create mode 100644 frameworks/onnxruntime/1.24.3-cpu/Dockerfile create mode 100644 frameworks/onnxruntime/1.24.3-cpu/README.md create mode 100644 frameworks/onnxruntime/1.24.3-cpu/build.conf create mode 100755 frameworks/onnxruntime/1.24.3-cpu/test.sh create mode 100644 frameworks/onnxruntime/1.24.3-gpu/Dockerfile create mode 100644 frameworks/onnxruntime/1.24.3-gpu/README.md create mode 100644 frameworks/onnxruntime/1.24.3-gpu/build.conf create mode 100755 frameworks/onnxruntime/1.24.3-gpu/test.sh create mode 100644 frameworks/onnxruntime/1.24.4-cpu/Dockerfile create mode 100644 frameworks/onnxruntime/1.24.4-cpu/README.md create mode 100644 frameworks/onnxruntime/1.24.4-cpu/build.conf create mode 100755 frameworks/onnxruntime/1.24.4-cpu/test.sh diff --git a/frameworks/onnxruntime/1.24.1-cpu/Dockerfile b/frameworks/onnxruntime/1.24.1-cpu/Dockerfile new file mode 100644 index 0000000..26ba70c --- /dev/null +++ b/frameworks/onnxruntime/1.24.1-cpu/Dockerfile @@ -0,0 +1,28 @@ +FROM opencloudos/opencloudos9-minimal:latest + +LABEL maintainer="Anstarc " +LABEL org.opencontainers.image.source="https://gitee.com/OpenCloudOS/ai-agent-container" +LABEL org.opencontainers.image.description="ONNX Runtime 1.24.1 CPU on OpenCloudOS 9" + +# 安装系统依赖 +RUN dnf install -y \ + python3.11 \ + python3.11-pip \ + git \ + wget \ + && dnf clean all \ + && rm -rf /var/cache/yum/* + +# 设置工作目录 +WORKDIR /app + +# 安装 ONNX Runtime CPU 版本及依赖 +# 注意:onnx 需要 1.20 以兼容 onnxruntime 1.24.1 (opset 25) +RUN pip3.11 install --no-cache-dir \ + onnxruntime==1.24.1 \ + onnx==1.20.1 \ + numpy \ + protobuf + +# 默认命令 +CMD ["python3.11", "-c", "import onnxruntime; print(f'ONNX Runtime {onnxruntime.__version__} ready')"] diff --git a/frameworks/onnxruntime/1.24.1-cpu/README.md b/frameworks/onnxruntime/1.24.1-cpu/README.md new file mode 100644 index 0000000..d877a1d --- /dev/null +++ b/frameworks/onnxruntime/1.24.1-cpu/README.md @@ -0,0 +1,82 @@ +# ONNX Runtime 1.24.1 CPU on OpenCloudOS 9 + +ONNX Runtime 是微软开源的跨平台机器学习推理加速器,支持多种深度学习框架(PyTorch、TensorFlow 等)训练的模型。 + +## 基本信息 + +- **基础镜像**: opencloudos/opencloudos9-minimal:latest +- **Python 版本**: 3.11 +- **ONNX Runtime 版本**: 1.24.1 (CPU) +- **ONNX 版本**: 1.20.1 (兼容 opset 25) + +## 构建 + +```bash +docker build -t oc9-onnxruntime-cpu:1.24.1 . +``` + +## 测试 + +```bash +./test.sh oc9-onnxruntime-cpu:1.24.1 +``` + +测试项包括: +- Python 环境 +- ONNX Runtime 版本验证 +- CPU 推理功能 +- ONNX ML 域支持 (ai.onnx.ml) +- ONNX IR 版本兼容性 (v3-v10) + +## 使用示例 + +### 基本使用 + +```bash +# 查看版本信息 +docker run --rm oc9-onnxruntime-cpu:1.24.1 python3.11 -c "import onnxruntime; print(onnxruntime.__version__)" + +# 查看可用的 Execution Providers +docker run --rm oc9-onnxruntime-cpu:1.24.1 python3.11 -c "import onnxruntime as ort; print(ort.get_available_providers())" +``` + +### CPU 推理 + +```python +import onnxruntime as ort +import numpy as np + +# 加载模型(使用 CPU Provider) +session = ort.InferenceSession( + "model.onnx", + providers=['CPUExecutionProvider'] +) + +# 准备输入数据 +input_name = session.get_inputs()[0].name +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) + +# 运行推理 +outputs = session.run(None, {input_name: input_data}) +``` + +### 交互式使用 + +```bash +docker run --rm -it oc9-onnxruntime-cpu:1.24.1 bash +``` + +## 支持的 Opset 和 IR 版本 + +- **ai.onnx**: opset 1-25 (标准推理算子) +- **ai.onnx.ml**: opset 1-5 (传统机器学习算子) +- **ONNX IR**: v3-v10 (2017-2024,完全向后兼容) + +## 系统要求 + +- **Docker**: 19.03+ + +## 参考资源 + +- [ONNX Runtime 官方文档](https://onnxruntime.ai/docs/) +- [CPU Execution Provider](https://onnxruntime.ai/docs/execution-providers/CPU-ExecutionProvider.html) diff --git a/frameworks/onnxruntime/1.24.1-cpu/build.conf b/frameworks/onnxruntime/1.24.1-cpu/build.conf new file mode 100644 index 0000000..b6b5028 --- /dev/null +++ b/frameworks/onnxruntime/1.24.1-cpu/build.conf @@ -0,0 +1,4 @@ +# ONNX Runtime 1.24.1 CPU on OpenCloudOS 9 +IMAGE_NAME=oc9-onnxruntime-cpu +IMAGE_TAG=1.24.1 +GPU_TEST=false diff --git a/frameworks/onnxruntime/1.24.1-cpu/test.sh b/frameworks/onnxruntime/1.24.1-cpu/test.sh new file mode 100755 index 0000000..16a261c --- /dev/null +++ b/frameworks/onnxruntime/1.24.1-cpu/test.sh @@ -0,0 +1,217 @@ +#!/bin/bash +set -e + +IMAGE="${1:-oc9-onnxruntime-cpu:1.24.1}" + +echo "==========================================" +echo "测试 ONNX Runtime 1.24.1 CPU 镜像" +echo "镜像: $IMAGE" +echo "==========================================" + +# 测试 1: Python 环境 +echo -n "测试 1: Python 环境... " +docker run --rm "$IMAGE" python3.11 --version > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 --version + exit 1 +fi + +# 测试 2: ONNX Runtime 版本 +echo -n "测试 2: ONNX Runtime 版本... " +VERSION=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime; print(onnxruntime.__version__)") +if [ "$VERSION" = "1.24.1" ]; then + echo "✓ (版本: $VERSION)" +else + echo "✗ (期望: 1.24.1, 实际: $VERSION)" + exit 1 +fi + +# 测试 3: 核心依赖 +echo -n "测试 3: 核心依赖 (numpy, protobuf)... " +docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" + exit 1 +fi + +# 测试 4: 可用的 Execution Providers +echo -n "测试 4: 可用的 Execution Providers... " +PROVIDERS=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime as ort; print(','.join(ort.get_available_providers()))") +echo "✓" +echo " 可用 Providers: $PROVIDERS" + +# 测试 5: CPU 推理测试 +echo -n "测试 5: CPU 推理测试... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 创建简单的 ONNX 模型 +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +# 使用 CPU Provider 进行推理 +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" + exit 1 +fi + +# 测试 6: ONNX ML 域支持 +echo -n "测试 6: ONNX ML 域 (ai.onnx.ml)... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试 LabelEncoder +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +assert list(output[0]) == [0, 1], 'ML domain test failed' +print('ML domain OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +print('Output:', output[0]) +" + exit 1 +fi + +# 测试 7: ONNX IR 版本兼容性 +echo -n "测试 7: ONNX IR 版本兼容性... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试多个 IR 版本 +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + +print('IR versions OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓ (测试 IR v3, v5, v7, v9)" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + print(f'IR v{ir_ver} OK') +" + exit 1 +fi + +echo "" +echo "==========================================" +echo "所有测试通过!" +echo "==========================================" diff --git a/frameworks/onnxruntime/1.24.1-gpu/Dockerfile b/frameworks/onnxruntime/1.24.1-gpu/Dockerfile new file mode 100644 index 0000000..e7a6cdf --- /dev/null +++ b/frameworks/onnxruntime/1.24.1-gpu/Dockerfile @@ -0,0 +1,32 @@ +FROM opencloudos/opencloudos9-cuda-devel:12.8 + +LABEL maintainer="Anstarc " +LABEL org.opencontainers.image.source="https://gitee.com/OpenCloudOS/ai-agent-container" +LABEL org.opencontainers.image.description="ONNX Runtime 1.24.1 GPU on OpenCloudOS 9" + +# 安装系统依赖 +RUN dnf install -y \ + python3.11 \ + python3.11-pip \ + git \ + wget \ + && dnf clean all \ + && rm -rf /var/cache/yum/* + +# 设置工作目录 +WORKDIR /app + +# 安装 ONNX Runtime GPU 版本及依赖 +# 注意:onnx 需要 1.20 以兼容 onnxruntime 1.24.1 (opset 25) +RUN pip3.11 install --no-cache-dir \ + onnxruntime-gpu==1.24.1 \ + onnx==1.20.1 \ + numpy \ + protobuf + +# 设置 GPU 环境变量 +ENV NVIDIA_VISIBLE_DEVICES=all +ENV CUDA_MODULE_LOADING=LAZY + +# 默认命令 +CMD ["python3.11", "-c", "import onnxruntime; print(f'ONNX Runtime {onnxruntime.__version__} ready')"] diff --git a/frameworks/onnxruntime/1.24.1-gpu/README.md b/frameworks/onnxruntime/1.24.1-gpu/README.md new file mode 100644 index 0000000..40b3aad --- /dev/null +++ b/frameworks/onnxruntime/1.24.1-gpu/README.md @@ -0,0 +1,85 @@ +# ONNX Runtime 1.24.1 GPU on OpenCloudOS 9 + +ONNX Runtime 是微软开源的跨平台机器学习推理加速器,支持多种深度学习框架(PyTorch、TensorFlow 等)训练的模型。 + +## 基本信息 + +- **基础镜像**: opencloudos/opencloudos9-cuda-devel:12.8 +- **Python 版本**: 3.11 +- **CUDA 版本**: 12.8 +- **ONNX Runtime 版本**: 1.24.1 (GPU) +- **ONNX 版本**: 1.20.1 (兼容 opset 25) + +## 构建 + +```bash +docker build -t oc9-onnxruntime-gpu:1.24.1 . +``` + +## 测试 + +```bash +./test.sh oc9-onnxruntime-gpu:1.24.1 +``` + +测试项包括: +- Python 和 CUDA 环境 +- ONNX Runtime 版本验证 +- GPU 推理功能 +- ONNX ML 域支持 (ai.onnx.ml) +- ONNX IR 版本兼容性 (v3-v10) + +## 使用示例 + +### 基本使用 + +```bash +# 查看版本信息 +docker run --rm oc9-onnxruntime-gpu:1.24.1 python3.11 -c "import onnxruntime; print(onnxruntime.__version__)" + +# 查看可用的 Execution Providers +docker run --rm --gpus all oc9-onnxruntime-gpu:1.24.1 python3.11 -c "import onnxruntime as ort; print(ort.get_available_providers())" +``` + +### GPU 推理 + +```python +import onnxruntime as ort +import numpy as np + +# 加载模型(使用 CUDA Provider) +session = ort.InferenceSession( + "model.onnx", + providers=['CUDAExecutionProvider', 'CPUExecutionProvider'] +) + +# 准备输入数据 +input_name = session.get_inputs()[0].name +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) + +# 运行推理 +outputs = session.run(None, {input_name: input_data}) +``` + +### 交互式使用 + +```bash +docker run --rm -it --gpus all oc9-onnxruntime-gpu:1.24.1 bash +``` + +## 支持的 Opset 和 IR 版本 + +- **ai.onnx**: opset 1-25 (标准推理算子,支持 GPU) +- **ai.onnx.ml**: opset 1-5 (传统机器学习算子,CPU only) +- **ONNX IR**: v3-v10 (2017-2024,完全向后兼容) + +## 系统要求 + +- **GPU**: NVIDIA GPU with CUDA 12.x support +- **显存**: 建议 4GB+ +- **Docker**: 19.03+ with nvidia-docker2 + +## 参考资源 + +- [ONNX Runtime 官方文档](https://onnxruntime.ai/docs/) +- [CUDA Execution Provider](https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html) diff --git a/frameworks/onnxruntime/1.24.1-gpu/build.conf b/frameworks/onnxruntime/1.24.1-gpu/build.conf new file mode 100644 index 0000000..0a257d6 --- /dev/null +++ b/frameworks/onnxruntime/1.24.1-gpu/build.conf @@ -0,0 +1,4 @@ +# ONNX Runtime 1.24.1 GPU on OpenCloudOS 9 +IMAGE_NAME=oc9-onnxruntime-gpu +IMAGE_TAG=1.24.1 +GPU_TEST=true diff --git a/frameworks/onnxruntime/1.24.1-gpu/test.sh b/frameworks/onnxruntime/1.24.1-gpu/test.sh new file mode 100755 index 0000000..fb409a9 --- /dev/null +++ b/frameworks/onnxruntime/1.24.1-gpu/test.sh @@ -0,0 +1,243 @@ +#!/bin/bash +set -e + +IMAGE="${1:-oc9-onnxruntime-gpu:1.24.1}" + +echo "==========================================" +echo "测试 ONNX Runtime 1.24.1 GPU 镜像" +echo "镜像: $IMAGE" +echo "==========================================" + +# 测试 1: Python 和 CUDA 环境 +echo -n "测试 1: Python 和 CUDA 环境... " +docker run --rm "$IMAGE" bash -c "python3.11 --version && nvcc --version" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" bash -c "python3.11 --version && nvcc --version" + exit 1 +fi + +# 测试 2: ONNX Runtime 版本 +echo -n "测试 2: ONNX Runtime 版本... " +VERSION=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime; print(onnxruntime.__version__)") +if [ "$VERSION" = "1.24.1" ]; then + echo "✓ (版本: $VERSION)" +else + echo "✗ (期望: 1.24.1, 实际: $VERSION)" + exit 1 +fi + +# 测试 3: 核心依赖 +echo -n "测试 3: 核心依赖 (numpy, protobuf)... " +docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" + exit 1 +fi + +# 测试 4: 可用的 Execution Providers +echo -n "测试 4: 可用的 Execution Providers... " +PROVIDERS=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime as ort; print(','.join(ort.get_available_providers()))") +echo "✓" +echo " 可用 Providers: $PROVIDERS" + +# 测试 5: GPU 环境检查 +echo -n "测试 5: GPU 环境检查... " +if command -v nvidia-smi &> /dev/null; then + GPU_COUNT=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l) + echo "✓ (检测到 $GPU_COUNT 个 GPU)" + + # 测试 6: CUDA Execution Provider + echo -n "测试 6: CUDA Execution Provider... " + HAS_CUDA=$(docker run --rm --gpus all "$IMAGE" python3.11 -c "import onnxruntime as ort; print('CUDAExecutionProvider' in ort.get_available_providers())") + if [ "$HAS_CUDA" = "True" ]; then + echo "✓" + else + echo "✗ (CUDA Provider 不可用)" + exit 1 + fi + + # 测试 7: 简单推理测试(使用 CUDA) + echo -n "测试 7: 简单推理测试 (CUDA)... " + docker run --rm --gpus all "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np + +# 创建简单的 ONNX 模型(identity 操作) +from onnxruntime import InferenceSession +import onnx +from onnx import helper, TensorProto + +# 创建一个简单的 identity 模型 +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) + +# 保存模型 +onnx.save(model, '/tmp/test.onnx') + +# 使用 CUDA Provider 进行推理 +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) + +print('推理成功') +" > /dev/null 2>&1 + if [ $? -eq 0 ]; then + echo "✓" + else + echo "✗" + docker run --rm --gpus all "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" + exit 1 + fi +else + echo "⊘ (未检测到 GPU,跳过 GPU 测试)" + echo "测试 6-7: 跳过 (需要 GPU 环境)" +fi + +# 测试 8: ONNX ML 域支持 +echo -n "测试 8: ONNX ML 域 (ai.onnx.ml)... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试 LabelEncoder +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +assert list(output[0]) == [0, 1], 'ML domain test failed' +print('ML domain OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +print('Output:', output[0]) +" + exit 1 +fi + +# 测试 9: ONNX IR 版本兼容性 +echo -n "测试 9: ONNX IR 版本兼容性... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试多个 IR 版本 +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + +print('IR versions OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓ (测试 IR v3, v5, v7, v9)" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + print(f'IR v{ir_ver} OK') +" + exit 1 +fi + +echo "" +echo "==========================================" +echo "所有测试通过!" +echo "==========================================" diff --git a/frameworks/onnxruntime/1.24.2-cpu/Dockerfile b/frameworks/onnxruntime/1.24.2-cpu/Dockerfile new file mode 100644 index 0000000..9dbf76b --- /dev/null +++ b/frameworks/onnxruntime/1.24.2-cpu/Dockerfile @@ -0,0 +1,28 @@ +FROM opencloudos/opencloudos9-minimal:latest + +LABEL maintainer="Anstarc " +LABEL org.opencontainers.image.source="https://gitee.com/OpenCloudOS/ai-agent-container" +LABEL org.opencontainers.image.description="ONNX Runtime 1.24.2 CPU on OpenCloudOS 9" + +# 安装系统依赖 +RUN dnf install -y \ + python3.11 \ + python3.11-pip \ + git \ + wget \ + && dnf clean all \ + && rm -rf /var/cache/yum/* + +# 设置工作目录 +WORKDIR /app + +# 安装 ONNX Runtime CPU 版本及依赖 +# 注意:onnx 需要 1.20 以兼容 onnxruntime 1.24.2 (opset 25) +RUN pip3.11 install --no-cache-dir \ + onnxruntime==1.24.2 \ + onnx==1.20.1 \ + numpy \ + protobuf + +# 默认命令 +CMD ["python3.11", "-c", "import onnxruntime; print(f'ONNX Runtime {onnxruntime.__version__} ready')"] diff --git a/frameworks/onnxruntime/1.24.2-cpu/README.md b/frameworks/onnxruntime/1.24.2-cpu/README.md new file mode 100644 index 0000000..4c486c0 --- /dev/null +++ b/frameworks/onnxruntime/1.24.2-cpu/README.md @@ -0,0 +1,82 @@ +# ONNX Runtime 1.24.2 CPU on OpenCloudOS 9 + +ONNX Runtime 是微软开源的跨平台机器学习推理加速器,支持多种深度学习框架(PyTorch、TensorFlow 等)训练的模型。 + +## 基本信息 + +- **基础镜像**: opencloudos/opencloudos9-minimal:latest +- **Python 版本**: 3.11 +- **ONNX Runtime 版本**: 1.24.2 (CPU) +- **ONNX 版本**: 1.20.1 (兼容 opset 25) + +## 构建 + +```bash +docker build -t oc9-onnxruntime-cpu:1.24.2 . +``` + +## 测试 + +```bash +./test.sh oc9-onnxruntime-cpu:1.24.2 +``` + +测试项包括: +- Python 环境 +- ONNX Runtime 版本验证 +- CPU 推理功能 +- ONNX ML 域支持 (ai.onnx.ml) +- ONNX IR 版本兼容性 (v3-v10) + +## 使用示例 + +### 基本使用 + +```bash +# 查看版本信息 +docker run --rm oc9-onnxruntime-cpu:1.24.2 python3.11 -c "import onnxruntime; print(onnxruntime.__version__)" + +# 查看可用的 Execution Providers +docker run --rm oc9-onnxruntime-cpu:1.24.2 python3.11 -c "import onnxruntime as ort; print(ort.get_available_providers())" +``` + +### CPU 推理 + +```python +import onnxruntime as ort +import numpy as np + +# 加载模型(使用 CPU Provider) +session = ort.InferenceSession( + "model.onnx", + providers=['CPUExecutionProvider'] +) + +# 准备输入数据 +input_name = session.get_inputs()[0].name +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) + +# 运行推理 +outputs = session.run(None, {input_name: input_data}) +``` + +### 交互式使用 + +```bash +docker run --rm -it oc9-onnxruntime-cpu:1.24.2 bash +``` + +## 支持的 Opset 和 IR 版本 + +- **ai.onnx**: opset 1-25 (标准推理算子) +- **ai.onnx.ml**: opset 1-5 (传统机器学习算子) +- **ONNX IR**: v3-v10 (2017-2024,完全向后兼容) + +## 系统要求 + +- **Docker**: 19.03+ + +## 参考资源 + +- [ONNX Runtime 官方文档](https://onnxruntime.ai/docs/) +- [CPU Execution Provider](https://onnxruntime.ai/docs/execution-providers/CPU-ExecutionProvider.html) diff --git a/frameworks/onnxruntime/1.24.2-cpu/build.conf b/frameworks/onnxruntime/1.24.2-cpu/build.conf new file mode 100644 index 0000000..c03bb1c --- /dev/null +++ b/frameworks/onnxruntime/1.24.2-cpu/build.conf @@ -0,0 +1,4 @@ +# ONNX Runtime 1.24.2 CPU on OpenCloudOS 9 +IMAGE_NAME=oc9-onnxruntime-cpu +IMAGE_TAG=1.24.2 +GPU_TEST=false diff --git a/frameworks/onnxruntime/1.24.2-cpu/test.sh b/frameworks/onnxruntime/1.24.2-cpu/test.sh new file mode 100755 index 0000000..747920b --- /dev/null +++ b/frameworks/onnxruntime/1.24.2-cpu/test.sh @@ -0,0 +1,217 @@ +#!/bin/bash +set -e + +IMAGE="${1:-oc9-onnxruntime-cpu:1.24.2}" + +echo "==========================================" +echo "测试 ONNX Runtime 1.24.2 CPU 镜像" +echo "镜像: $IMAGE" +echo "==========================================" + +# 测试 1: Python 环境 +echo -n "测试 1: Python 环境... " +docker run --rm "$IMAGE" python3.11 --version > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 --version + exit 1 +fi + +# 测试 2: ONNX Runtime 版本 +echo -n "测试 2: ONNX Runtime 版本... " +VERSION=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime; print(onnxruntime.__version__)") +if [ "$VERSION" = "1.24.2" ]; then + echo "✓ (版本: $VERSION)" +else + echo "✗ (期望: 1.24.2, 实际: $VERSION)" + exit 1 +fi + +# 测试 3: 核心依赖 +echo -n "测试 3: 核心依赖 (numpy, protobuf)... " +docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" + exit 1 +fi + +# 测试 4: 可用的 Execution Providers +echo -n "测试 4: 可用的 Execution Providers... " +PROVIDERS=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime as ort; print(','.join(ort.get_available_providers()))") +echo "✓" +echo " 可用 Providers: $PROVIDERS" + +# 测试 5: CPU 推理测试 +echo -n "测试 5: CPU 推理测试... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 创建简单的 ONNX 模型 +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +# 使用 CPU Provider 进行推理 +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" + exit 1 +fi + +# 测试 6: ONNX ML 域支持 +echo -n "测试 6: ONNX ML 域 (ai.onnx.ml)... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试 LabelEncoder +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +assert list(output[0]) == [0, 1], 'ML domain test failed' +print('ML domain OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +print('Output:', output[0]) +" + exit 1 +fi + +# 测试 7: ONNX IR 版本兼容性 +echo -n "测试 7: ONNX IR 版本兼容性... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试多个 IR 版本 +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + +print('IR versions OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓ (测试 IR v3, v5, v7, v9)" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + print(f'IR v{ir_ver} OK') +" + exit 1 +fi + +echo "" +echo "==========================================" +echo "所有测试通过!" +echo "==========================================" diff --git a/frameworks/onnxruntime/1.24.2-gpu/Dockerfile b/frameworks/onnxruntime/1.24.2-gpu/Dockerfile new file mode 100644 index 0000000..e404c1e --- /dev/null +++ b/frameworks/onnxruntime/1.24.2-gpu/Dockerfile @@ -0,0 +1,32 @@ +FROM opencloudos/opencloudos9-cuda-devel:12.8 + +LABEL maintainer="Anstarc " +LABEL org.opencontainers.image.source="https://gitee.com/OpenCloudOS/ai-agent-container" +LABEL org.opencontainers.image.description="ONNX Runtime 1.24.2 GPU on OpenCloudOS 9" + +# 安装系统依赖 +RUN dnf install -y \ + python3.11 \ + python3.11-pip \ + git \ + wget \ + && dnf clean all \ + && rm -rf /var/cache/yum/* + +# 设置工作目录 +WORKDIR /app + +# 安装 ONNX Runtime GPU 版本及依赖 +# 注意:onnx 需要 1.20 以兼容 onnxruntime 1.24.2 (opset 25) +RUN pip3.11 install --no-cache-dir \ + onnxruntime-gpu==1.24.2 \ + onnx==1.20.1 \ + numpy \ + protobuf + +# 设置 GPU 环境变量 +ENV NVIDIA_VISIBLE_DEVICES=all +ENV CUDA_MODULE_LOADING=LAZY + +# 默认命令 +CMD ["python3.11", "-c", "import onnxruntime; print(f'ONNX Runtime {onnxruntime.__version__} ready')"] diff --git a/frameworks/onnxruntime/1.24.2-gpu/README.md b/frameworks/onnxruntime/1.24.2-gpu/README.md new file mode 100644 index 0000000..1abbddd --- /dev/null +++ b/frameworks/onnxruntime/1.24.2-gpu/README.md @@ -0,0 +1,85 @@ +# ONNX Runtime 1.24.2 GPU on OpenCloudOS 9 + +ONNX Runtime 是微软开源的跨平台机器学习推理加速器,支持多种深度学习框架(PyTorch、TensorFlow 等)训练的模型。 + +## 基本信息 + +- **基础镜像**: opencloudos/opencloudos9-cuda-devel:12.8 +- **Python 版本**: 3.11 +- **CUDA 版本**: 12.8 +- **ONNX Runtime 版本**: 1.24.2 (GPU) +- **ONNX 版本**: 1.20.1 (兼容 opset 25) + +## 构建 + +```bash +docker build -t oc9-onnxruntime-gpu:1.24.2 . +``` + +## 测试 + +```bash +./test.sh oc9-onnxruntime-gpu:1.24.2 +``` + +测试项包括: +- Python 和 CUDA 环境 +- ONNX Runtime 版本验证 +- GPU 推理功能 +- ONNX ML 域支持 (ai.onnx.ml) +- ONNX IR 版本兼容性 (v3-v10) + +## 使用示例 + +### 基本使用 + +```bash +# 查看版本信息 +docker run --rm oc9-onnxruntime-gpu:1.24.2 python3.11 -c "import onnxruntime; print(onnxruntime.__version__)" + +# 查看可用的 Execution Providers +docker run --rm --gpus all oc9-onnxruntime-gpu:1.24.2 python3.11 -c "import onnxruntime as ort; print(ort.get_available_providers())" +``` + +### GPU 推理 + +```python +import onnxruntime as ort +import numpy as np + +# 加载模型(使用 CUDA Provider) +session = ort.InferenceSession( + "model.onnx", + providers=['CUDAExecutionProvider', 'CPUExecutionProvider'] +) + +# 准备输入数据 +input_name = session.get_inputs()[0].name +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) + +# 运行推理 +outputs = session.run(None, {input_name: input_data}) +``` + +### 交互式使用 + +```bash +docker run --rm -it --gpus all oc9-onnxruntime-gpu:1.24.2 bash +``` + +## 支持的 Opset 和 IR 版本 + +- **ai.onnx**: opset 1-25 (标准推理算子,支持 GPU) +- **ai.onnx.ml**: opset 1-5 (传统机器学习算子,CPU only) +- **ONNX IR**: v3-v10 (2017-2024,完全向后兼容) + +## 系统要求 + +- **GPU**: NVIDIA GPU with CUDA 12.x support +- **显存**: 建议 4GB+ +- **Docker**: 19.03+ with nvidia-docker2 + +## 参考资源 + +- [ONNX Runtime 官方文档](https://onnxruntime.ai/docs/) +- [CUDA Execution Provider](https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html) diff --git a/frameworks/onnxruntime/1.24.2-gpu/build.conf b/frameworks/onnxruntime/1.24.2-gpu/build.conf new file mode 100644 index 0000000..44e96cb --- /dev/null +++ b/frameworks/onnxruntime/1.24.2-gpu/build.conf @@ -0,0 +1,4 @@ +# ONNX Runtime 1.24.2 GPU on OpenCloudOS 9 +IMAGE_NAME=oc9-onnxruntime-gpu +IMAGE_TAG=1.24.2 +GPU_TEST=true diff --git a/frameworks/onnxruntime/1.24.2-gpu/test.sh b/frameworks/onnxruntime/1.24.2-gpu/test.sh new file mode 100755 index 0000000..4613042 --- /dev/null +++ b/frameworks/onnxruntime/1.24.2-gpu/test.sh @@ -0,0 +1,243 @@ +#!/bin/bash +set -e + +IMAGE="${1:-oc9-onnxruntime-gpu:1.24.2}" + +echo "==========================================" +echo "测试 ONNX Runtime 1.24.2 GPU 镜像" +echo "镜像: $IMAGE" +echo "==========================================" + +# 测试 1: Python 和 CUDA 环境 +echo -n "测试 1: Python 和 CUDA 环境... " +docker run --rm "$IMAGE" bash -c "python3.11 --version && nvcc --version" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" bash -c "python3.11 --version && nvcc --version" + exit 1 +fi + +# 测试 2: ONNX Runtime 版本 +echo -n "测试 2: ONNX Runtime 版本... " +VERSION=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime; print(onnxruntime.__version__)") +if [ "$VERSION" = "1.24.2" ]; then + echo "✓ (版本: $VERSION)" +else + echo "✗ (期望: 1.24.2, 实际: $VERSION)" + exit 1 +fi + +# 测试 3: 核心依赖 +echo -n "测试 3: 核心依赖 (numpy, protobuf)... " +docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" + exit 1 +fi + +# 测试 4: 可用的 Execution Providers +echo -n "测试 4: 可用的 Execution Providers... " +PROVIDERS=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime as ort; print(','.join(ort.get_available_providers()))") +echo "✓" +echo " 可用 Providers: $PROVIDERS" + +# 测试 5: GPU 环境检查 +echo -n "测试 5: GPU 环境检查... " +if command -v nvidia-smi &> /dev/null; then + GPU_COUNT=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l) + echo "✓ (检测到 $GPU_COUNT 个 GPU)" + + # 测试 6: CUDA Execution Provider + echo -n "测试 6: CUDA Execution Provider... " + HAS_CUDA=$(docker run --rm --gpus all "$IMAGE" python3.11 -c "import onnxruntime as ort; print('CUDAExecutionProvider' in ort.get_available_providers())") + if [ "$HAS_CUDA" = "True" ]; then + echo "✓" + else + echo "✗ (CUDA Provider 不可用)" + exit 1 + fi + + # 测试 7: 简单推理测试(使用 CUDA) + echo -n "测试 7: 简单推理测试 (CUDA)... " + docker run --rm --gpus all "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np + +# 创建简单的 ONNX 模型(identity 操作) +from onnxruntime import InferenceSession +import onnx +from onnx import helper, TensorProto + +# 创建一个简单的 identity 模型 +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) + +# 保存模型 +onnx.save(model, '/tmp/test.onnx') + +# 使用 CUDA Provider 进行推理 +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) + +print('推理成功') +" > /dev/null 2>&1 + if [ $? -eq 0 ]; then + echo "✓" + else + echo "✗" + docker run --rm --gpus all "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" + exit 1 + fi +else + echo "⊘ (未检测到 GPU,跳过 GPU 测试)" + echo "测试 6-7: 跳过 (需要 GPU 环境)" +fi + +# 测试 8: ONNX ML 域支持 +echo -n "测试 8: ONNX ML 域 (ai.onnx.ml)... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试 LabelEncoder +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +assert list(output[0]) == [0, 1], 'ML domain test failed' +print('ML domain OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +print('Output:', output[0]) +" + exit 1 +fi + +# 测试 9: ONNX IR 版本兼容性 +echo -n "测试 9: ONNX IR 版本兼容性... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试多个 IR 版本 +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + +print('IR versions OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓ (测试 IR v3, v5, v7, v9)" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + print(f'IR v{ir_ver} OK') +" + exit 1 +fi + +echo "" +echo "==========================================" +echo "所有测试通过!" +echo "==========================================" diff --git a/frameworks/onnxruntime/1.24.3-cpu/Dockerfile b/frameworks/onnxruntime/1.24.3-cpu/Dockerfile new file mode 100644 index 0000000..4a0c7db --- /dev/null +++ b/frameworks/onnxruntime/1.24.3-cpu/Dockerfile @@ -0,0 +1,28 @@ +FROM opencloudos/opencloudos9-minimal:latest + +LABEL maintainer="Anstarc " +LABEL org.opencontainers.image.source="https://gitee.com/OpenCloudOS/ai-agent-container" +LABEL org.opencontainers.image.description="ONNX Runtime 1.24.3 CPU on OpenCloudOS 9" + +# 安装系统依赖 +RUN dnf install -y \ + python3.11 \ + python3.11-pip \ + git \ + wget \ + && dnf clean all \ + && rm -rf /var/cache/yum/* + +# 设置工作目录 +WORKDIR /app + +# 安装 ONNX Runtime CPU 版本及依赖 +# 注意:onnx 需要 1.20 以兼容 onnxruntime 1.24.3 (opset 25) +RUN pip3.11 install --no-cache-dir \ + onnxruntime==1.24.3 \ + onnx==1.20.1 \ + numpy \ + protobuf + +# 默认命令 +CMD ["python3.11", "-c", "import onnxruntime; print(f'ONNX Runtime {onnxruntime.__version__} ready')"] diff --git a/frameworks/onnxruntime/1.24.3-cpu/README.md b/frameworks/onnxruntime/1.24.3-cpu/README.md new file mode 100644 index 0000000..e3846a2 --- /dev/null +++ b/frameworks/onnxruntime/1.24.3-cpu/README.md @@ -0,0 +1,82 @@ +# ONNX Runtime 1.24.3 CPU on OpenCloudOS 9 + +ONNX Runtime 是微软开源的跨平台机器学习推理加速器,支持多种深度学习框架(PyTorch、TensorFlow 等)训练的模型。 + +## 基本信息 + +- **基础镜像**: opencloudos/opencloudos9-minimal:latest +- **Python 版本**: 3.11 +- **ONNX Runtime 版本**: 1.24.3 (CPU) +- **ONNX 版本**: 1.20.1 (兼容 opset 25) + +## 构建 + +```bash +docker build -t oc9-onnxruntime-cpu:1.24.3 . +``` + +## 测试 + +```bash +./test.sh oc9-onnxruntime-cpu:1.24.3 +``` + +测试项包括: +- Python 环境 +- ONNX Runtime 版本验证 +- CPU 推理功能 +- ONNX ML 域支持 (ai.onnx.ml) +- ONNX IR 版本兼容性 (v3-v10) + +## 使用示例 + +### 基本使用 + +```bash +# 查看版本信息 +docker run --rm oc9-onnxruntime-cpu:1.24.3 python3.11 -c "import onnxruntime; print(onnxruntime.__version__)" + +# 查看可用的 Execution Providers +docker run --rm oc9-onnxruntime-cpu:1.24.3 python3.11 -c "import onnxruntime as ort; print(ort.get_available_providers())" +``` + +### CPU 推理 + +```python +import onnxruntime as ort +import numpy as np + +# 加载模型(使用 CPU Provider) +session = ort.InferenceSession( + "model.onnx", + providers=['CPUExecutionProvider'] +) + +# 准备输入数据 +input_name = session.get_inputs()[0].name +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) + +# 运行推理 +outputs = session.run(None, {input_name: input_data}) +``` + +### 交互式使用 + +```bash +docker run --rm -it oc9-onnxruntime-cpu:1.24.3 bash +``` + +## 支持的 Opset 和 IR 版本 + +- **ai.onnx**: opset 1-25 (标准推理算子) +- **ai.onnx.ml**: opset 1-5 (传统机器学习算子) +- **ONNX IR**: v3-v10 (2017-2024,完全向后兼容) + +## 系统要求 + +- **Docker**: 19.03+ + +## 参考资源 + +- [ONNX Runtime 官方文档](https://onnxruntime.ai/docs/) +- [CPU Execution Provider](https://onnxruntime.ai/docs/execution-providers/CPU-ExecutionProvider.html) diff --git a/frameworks/onnxruntime/1.24.3-cpu/build.conf b/frameworks/onnxruntime/1.24.3-cpu/build.conf new file mode 100644 index 0000000..511886b --- /dev/null +++ b/frameworks/onnxruntime/1.24.3-cpu/build.conf @@ -0,0 +1,4 @@ +# ONNX Runtime 1.24.3 CPU on OpenCloudOS 9 +IMAGE_NAME=oc9-onnxruntime-cpu +IMAGE_TAG=1.24.3 +GPU_TEST=false diff --git a/frameworks/onnxruntime/1.24.3-cpu/test.sh b/frameworks/onnxruntime/1.24.3-cpu/test.sh new file mode 100755 index 0000000..f3a072b --- /dev/null +++ b/frameworks/onnxruntime/1.24.3-cpu/test.sh @@ -0,0 +1,217 @@ +#!/bin/bash +set -e + +IMAGE="${1:-oc9-onnxruntime-cpu:1.24.3}" + +echo "==========================================" +echo "测试 ONNX Runtime 1.24.3 CPU 镜像" +echo "镜像: $IMAGE" +echo "==========================================" + +# 测试 1: Python 环境 +echo -n "测试 1: Python 环境... " +docker run --rm "$IMAGE" python3.11 --version > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 --version + exit 1 +fi + +# 测试 2: ONNX Runtime 版本 +echo -n "测试 2: ONNX Runtime 版本... " +VERSION=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime; print(onnxruntime.__version__)") +if [ "$VERSION" = "1.24.3" ]; then + echo "✓ (版本: $VERSION)" +else + echo "✗ (期望: 1.24.3, 实际: $VERSION)" + exit 1 +fi + +# 测试 3: 核心依赖 +echo -n "测试 3: 核心依赖 (numpy, protobuf)... " +docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" + exit 1 +fi + +# 测试 4: 可用的 Execution Providers +echo -n "测试 4: 可用的 Execution Providers... " +PROVIDERS=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime as ort; print(','.join(ort.get_available_providers()))") +echo "✓" +echo " 可用 Providers: $PROVIDERS" + +# 测试 5: CPU 推理测试 +echo -n "测试 5: CPU 推理测试... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 创建简单的 ONNX 模型 +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +# 使用 CPU Provider 进行推理 +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" + exit 1 +fi + +# 测试 6: ONNX ML 域支持 +echo -n "测试 6: ONNX ML 域 (ai.onnx.ml)... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试 LabelEncoder +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +assert list(output[0]) == [0, 1], 'ML domain test failed' +print('ML domain OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +print('Output:', output[0]) +" + exit 1 +fi + +# 测试 7: ONNX IR 版本兼容性 +echo -n "测试 7: ONNX IR 版本兼容性... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试多个 IR 版本 +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + +print('IR versions OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓ (测试 IR v3, v5, v7, v9)" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + print(f'IR v{ir_ver} OK') +" + exit 1 +fi + +echo "" +echo "==========================================" +echo "所有测试通过!" +echo "==========================================" diff --git a/frameworks/onnxruntime/1.24.3-gpu/Dockerfile b/frameworks/onnxruntime/1.24.3-gpu/Dockerfile new file mode 100644 index 0000000..cb370e4 --- /dev/null +++ b/frameworks/onnxruntime/1.24.3-gpu/Dockerfile @@ -0,0 +1,32 @@ +FROM opencloudos/opencloudos9-cuda-devel:12.8 + +LABEL maintainer="Anstarc " +LABEL org.opencontainers.image.source="https://gitee.com/OpenCloudOS/ai-agent-container" +LABEL org.opencontainers.image.description="ONNX Runtime 1.24.3 GPU on OpenCloudOS 9" + +# 安装系统依赖 +RUN dnf install -y \ + python3.11 \ + python3.11-pip \ + git \ + wget \ + && dnf clean all \ + && rm -rf /var/cache/yum/* + +# 设置工作目录 +WORKDIR /app + +# 安装 ONNX Runtime GPU 版本及依赖 +# 注意:onnx 需要 1.20 以兼容 onnxruntime 1.24.3 (opset 25) +RUN pip3.11 install --no-cache-dir \ + onnxruntime-gpu==1.24.3 \ + onnx==1.20.1 \ + numpy \ + protobuf + +# 设置 GPU 环境变量 +ENV NVIDIA_VISIBLE_DEVICES=all +ENV CUDA_MODULE_LOADING=LAZY + +# 默认命令 +CMD ["python3.11", "-c", "import onnxruntime; print(f'ONNX Runtime {onnxruntime.__version__} ready')"] diff --git a/frameworks/onnxruntime/1.24.3-gpu/README.md b/frameworks/onnxruntime/1.24.3-gpu/README.md new file mode 100644 index 0000000..13a058a --- /dev/null +++ b/frameworks/onnxruntime/1.24.3-gpu/README.md @@ -0,0 +1,85 @@ +# ONNX Runtime 1.24.3 GPU on OpenCloudOS 9 + +ONNX Runtime 是微软开源的跨平台机器学习推理加速器,支持多种深度学习框架(PyTorch、TensorFlow 等)训练的模型。 + +## 基本信息 + +- **基础镜像**: opencloudos/opencloudos9-cuda-devel:12.8 +- **Python 版本**: 3.11 +- **CUDA 版本**: 12.8 +- **ONNX Runtime 版本**: 1.24.3 (GPU) +- **ONNX 版本**: 1.20.1 (兼容 opset 25) + +## 构建 + +```bash +docker build -t oc9-onnxruntime-gpu:1.24.3 . +``` + +## 测试 + +```bash +./test.sh oc9-onnxruntime-gpu:1.24.3 +``` + +测试项包括: +- Python 和 CUDA 环境 +- ONNX Runtime 版本验证 +- GPU 推理功能 +- ONNX ML 域支持 (ai.onnx.ml) +- ONNX IR 版本兼容性 (v3-v10) + +## 使用示例 + +### 基本使用 + +```bash +# 查看版本信息 +docker run --rm oc9-onnxruntime-gpu:1.24.3 python3.11 -c "import onnxruntime; print(onnxruntime.__version__)" + +# 查看可用的 Execution Providers +docker run --rm --gpus all oc9-onnxruntime-gpu:1.24.3 python3.11 -c "import onnxruntime as ort; print(ort.get_available_providers())" +``` + +### GPU 推理 + +```python +import onnxruntime as ort +import numpy as np + +# 加载模型(使用 CUDA Provider) +session = ort.InferenceSession( + "model.onnx", + providers=['CUDAExecutionProvider', 'CPUExecutionProvider'] +) + +# 准备输入数据 +input_name = session.get_inputs()[0].name +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) + +# 运行推理 +outputs = session.run(None, {input_name: input_data}) +``` + +### 交互式使用 + +```bash +docker run --rm -it --gpus all oc9-onnxruntime-gpu:1.24.3 bash +``` + +## 支持的 Opset 和 IR 版本 + +- **ai.onnx**: opset 1-25 (标准推理算子,支持 GPU) +- **ai.onnx.ml**: opset 1-5 (传统机器学习算子,CPU only) +- **ONNX IR**: v3-v10 (2017-2024,完全向后兼容) + +## 系统要求 + +- **GPU**: NVIDIA GPU with CUDA 12.x support +- **显存**: 建议 4GB+ +- **Docker**: 19.03+ with nvidia-docker2 + +## 参考资源 + +- [ONNX Runtime 官方文档](https://onnxruntime.ai/docs/) +- [CUDA Execution Provider](https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html) diff --git a/frameworks/onnxruntime/1.24.3-gpu/build.conf b/frameworks/onnxruntime/1.24.3-gpu/build.conf new file mode 100644 index 0000000..dd6b2d9 --- /dev/null +++ b/frameworks/onnxruntime/1.24.3-gpu/build.conf @@ -0,0 +1,4 @@ +# ONNX Runtime 1.24.3 GPU on OpenCloudOS 9 +IMAGE_NAME=oc9-onnxruntime-gpu +IMAGE_TAG=1.24.3 +GPU_TEST=true diff --git a/frameworks/onnxruntime/1.24.3-gpu/test.sh b/frameworks/onnxruntime/1.24.3-gpu/test.sh new file mode 100755 index 0000000..6eee19d --- /dev/null +++ b/frameworks/onnxruntime/1.24.3-gpu/test.sh @@ -0,0 +1,243 @@ +#!/bin/bash +set -e + +IMAGE="${1:-oc9-onnxruntime-gpu:1.24.3}" + +echo "==========================================" +echo "测试 ONNX Runtime 1.24.3 GPU 镜像" +echo "镜像: $IMAGE" +echo "==========================================" + +# 测试 1: Python 和 CUDA 环境 +echo -n "测试 1: Python 和 CUDA 环境... " +docker run --rm "$IMAGE" bash -c "python3.11 --version && nvcc --version" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" bash -c "python3.11 --version && nvcc --version" + exit 1 +fi + +# 测试 2: ONNX Runtime 版本 +echo -n "测试 2: ONNX Runtime 版本... " +VERSION=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime; print(onnxruntime.__version__)") +if [ "$VERSION" = "1.24.3" ]; then + echo "✓ (版本: $VERSION)" +else + echo "✗ (期望: 1.24.3, 实际: $VERSION)" + exit 1 +fi + +# 测试 3: 核心依赖 +echo -n "测试 3: 核心依赖 (numpy, protobuf)... " +docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" + exit 1 +fi + +# 测试 4: 可用的 Execution Providers +echo -n "测试 4: 可用的 Execution Providers... " +PROVIDERS=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime as ort; print(','.join(ort.get_available_providers()))") +echo "✓" +echo " 可用 Providers: $PROVIDERS" + +# 测试 5: GPU 环境检查 +echo -n "测试 5: GPU 环境检查... " +if command -v nvidia-smi &> /dev/null; then + GPU_COUNT=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l) + echo "✓ (检测到 $GPU_COUNT 个 GPU)" + + # 测试 6: CUDA Execution Provider + echo -n "测试 6: CUDA Execution Provider... " + HAS_CUDA=$(docker run --rm --gpus all "$IMAGE" python3.11 -c "import onnxruntime as ort; print('CUDAExecutionProvider' in ort.get_available_providers())") + if [ "$HAS_CUDA" = "True" ]; then + echo "✓" + else + echo "✗ (CUDA Provider 不可用)" + exit 1 + fi + + # 测试 7: 简单推理测试(使用 CUDA) + echo -n "测试 7: 简单推理测试 (CUDA)... " + docker run --rm --gpus all "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np + +# 创建简单的 ONNX 模型(identity 操作) +from onnxruntime import InferenceSession +import onnx +from onnx import helper, TensorProto + +# 创建一个简单的 identity 模型 +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) + +# 保存模型 +onnx.save(model, '/tmp/test.onnx') + +# 使用 CUDA Provider 进行推理 +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) + +print('推理成功') +" > /dev/null 2>&1 + if [ $? -eq 0 ]; then + echo "✓" + else + echo "✗" + docker run --rm --gpus all "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" + exit 1 + fi +else + echo "⊘ (未检测到 GPU,跳过 GPU 测试)" + echo "测试 6-7: 跳过 (需要 GPU 环境)" +fi + +# 测试 8: ONNX ML 域支持 +echo -n "测试 8: ONNX ML 域 (ai.onnx.ml)... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试 LabelEncoder +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +assert list(output[0]) == [0, 1], 'ML domain test failed' +print('ML domain OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +print('Output:', output[0]) +" + exit 1 +fi + +# 测试 9: ONNX IR 版本兼容性 +echo -n "测试 9: ONNX IR 版本兼容性... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试多个 IR 版本 +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + +print('IR versions OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓ (测试 IR v3, v5, v7, v9)" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + print(f'IR v{ir_ver} OK') +" + exit 1 +fi + +echo "" +echo "==========================================" +echo "所有测试通过!" +echo "==========================================" diff --git a/frameworks/onnxruntime/1.24.4-cpu/Dockerfile b/frameworks/onnxruntime/1.24.4-cpu/Dockerfile new file mode 100644 index 0000000..5b8b985 --- /dev/null +++ b/frameworks/onnxruntime/1.24.4-cpu/Dockerfile @@ -0,0 +1,28 @@ +FROM opencloudos/opencloudos9-minimal:latest + +LABEL maintainer="Anstarc " +LABEL org.opencontainers.image.source="https://gitee.com/OpenCloudOS/ai-agent-container" +LABEL org.opencontainers.image.description="ONNX Runtime 1.24.4 CPU on OpenCloudOS 9" + +# 安装系统依赖 +RUN dnf install -y \ + python3.11 \ + python3.11-pip \ + git \ + wget \ + && dnf clean all \ + && rm -rf /var/cache/yum/* + +# 设置工作目录 +WORKDIR /app + +# 安装 ONNX Runtime CPU 版本及依赖 +# 注意:onnx 需要 1.20 以兼容 onnxruntime 1.24.4 (opset 25) +RUN pip3.11 install --no-cache-dir \ + onnxruntime==1.24.4 \ + onnx==1.20.1 \ + numpy \ + protobuf + +# 默认命令 +CMD ["python3.11", "-c", "import onnxruntime; print(f'ONNX Runtime {onnxruntime.__version__} ready')"] diff --git a/frameworks/onnxruntime/1.24.4-cpu/README.md b/frameworks/onnxruntime/1.24.4-cpu/README.md new file mode 100644 index 0000000..b1495f1 --- /dev/null +++ b/frameworks/onnxruntime/1.24.4-cpu/README.md @@ -0,0 +1,82 @@ +# ONNX Runtime 1.24.4 CPU on OpenCloudOS 9 + +ONNX Runtime 是微软开源的跨平台机器学习推理加速器,支持多种深度学习框架(PyTorch、TensorFlow 等)训练的模型。 + +## 基本信息 + +- **基础镜像**: opencloudos/opencloudos9-minimal:latest +- **Python 版本**: 3.11 +- **ONNX Runtime 版本**: 1.24.4 (CPU) +- **ONNX 版本**: 1.20.1 (兼容 opset 25) + +## 构建 + +```bash +docker build -t oc9-onnxruntime-cpu:1.24.4 . +``` + +## 测试 + +```bash +./test.sh oc9-onnxruntime-cpu:1.24.4 +``` + +测试项包括: +- Python 环境 +- ONNX Runtime 版本验证 +- CPU 推理功能 +- ONNX ML 域支持 (ai.onnx.ml) +- ONNX IR 版本兼容性 (v3-v10) + +## 使用示例 + +### 基本使用 + +```bash +# 查看版本信息 +docker run --rm oc9-onnxruntime-cpu:1.24.4 python3.11 -c "import onnxruntime; print(onnxruntime.__version__)" + +# 查看可用的 Execution Providers +docker run --rm oc9-onnxruntime-cpu:1.24.4 python3.11 -c "import onnxruntime as ort; print(ort.get_available_providers())" +``` + +### CPU 推理 + +```python +import onnxruntime as ort +import numpy as np + +# 加载模型(使用 CPU Provider) +session = ort.InferenceSession( + "model.onnx", + providers=['CPUExecutionProvider'] +) + +# 准备输入数据 +input_name = session.get_inputs()[0].name +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) + +# 运行推理 +outputs = session.run(None, {input_name: input_data}) +``` + +### 交互式使用 + +```bash +docker run --rm -it oc9-onnxruntime-cpu:1.24.4 bash +``` + +## 支持的 Opset 和 IR 版本 + +- **ai.onnx**: opset 1-25 (标准推理算子) +- **ai.onnx.ml**: opset 1-5 (传统机器学习算子) +- **ONNX IR**: v3-v10 (2017-2024,完全向后兼容) + +## 系统要求 + +- **Docker**: 19.03+ + +## 参考资源 + +- [ONNX Runtime 官方文档](https://onnxruntime.ai/docs/) +- [CPU Execution Provider](https://onnxruntime.ai/docs/execution-providers/CPU-ExecutionProvider.html) diff --git a/frameworks/onnxruntime/1.24.4-cpu/build.conf b/frameworks/onnxruntime/1.24.4-cpu/build.conf new file mode 100644 index 0000000..f86e191 --- /dev/null +++ b/frameworks/onnxruntime/1.24.4-cpu/build.conf @@ -0,0 +1,4 @@ +# ONNX Runtime 1.24.4 CPU on OpenCloudOS 9 +IMAGE_NAME=oc9-onnxruntime-cpu +IMAGE_TAG=1.24.4 +GPU_TEST=false diff --git a/frameworks/onnxruntime/1.24.4-cpu/test.sh b/frameworks/onnxruntime/1.24.4-cpu/test.sh new file mode 100755 index 0000000..aa82543 --- /dev/null +++ b/frameworks/onnxruntime/1.24.4-cpu/test.sh @@ -0,0 +1,217 @@ +#!/bin/bash +set -e + +IMAGE="${1:-oc9-onnxruntime-cpu:1.24.4}" + +echo "==========================================" +echo "测试 ONNX Runtime 1.24.4 CPU 镜像" +echo "镜像: $IMAGE" +echo "==========================================" + +# 测试 1: Python 环境 +echo -n "测试 1: Python 环境... " +docker run --rm "$IMAGE" python3.11 --version > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 --version + exit 1 +fi + +# 测试 2: ONNX Runtime 版本 +echo -n "测试 2: ONNX Runtime 版本... " +VERSION=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime; print(onnxruntime.__version__)") +if [ "$VERSION" = "1.24.4" ]; then + echo "✓ (版本: $VERSION)" +else + echo "✗ (期望: 1.24.4, 实际: $VERSION)" + exit 1 +fi + +# 测试 3: 核心依赖 +echo -n "测试 3: 核心依赖 (numpy, protobuf)... " +docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" + exit 1 +fi + +# 测试 4: 可用的 Execution Providers +echo -n "测试 4: 可用的 Execution Providers... " +PROVIDERS=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime as ort; print(','.join(ort.get_available_providers()))") +echo "✓" +echo " 可用 Providers: $PROVIDERS" + +# 测试 5: CPU 推理测试 +echo -n "测试 5: CPU 推理测试... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 创建简单的 ONNX 模型 +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +# 使用 CPU Provider 进行推理 +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" + exit 1 +fi + +# 测试 6: ONNX ML 域支持 +echo -n "测试 6: ONNX ML 域 (ai.onnx.ml)... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试 LabelEncoder +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +assert list(output[0]) == [0, 1], 'ML domain test failed' +print('ML domain OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +print('Output:', output[0]) +" + exit 1 +fi + +# 测试 7: ONNX IR 版本兼容性 +echo -n "测试 7: ONNX IR 版本兼容性... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试多个 IR 版本 +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + +print('IR versions OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓ (测试 IR v3, v5, v7, v9)" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + print(f'IR v{ir_ver} OK') +" + exit 1 +fi + +echo "" +echo "==========================================" +echo "所有测试通过!" +echo "==========================================" -- Gitee From a755ad5f58e0541a09ec221fa51543b497c9aa50 Mon Sep 17 00:00:00 2001 From: Anstarc Date: Tue, 5 May 2026 16:16:43 +0800 Subject: [PATCH 3/4] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=20GPU=20=E7=89=88?= =?UTF-8?q?=E6=9C=AC=20nvcc=20=E8=B7=AF=E5=BE=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- frameworks/onnxruntime/1.24.1-gpu/test.sh | 4 ++-- frameworks/onnxruntime/1.24.2-gpu/test.sh | 4 ++-- frameworks/onnxruntime/1.24.3-gpu/test.sh | 4 ++-- frameworks/onnxruntime/1.24.4-gpu/test.sh | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/frameworks/onnxruntime/1.24.1-gpu/test.sh b/frameworks/onnxruntime/1.24.1-gpu/test.sh index fb409a9..e49ac5e 100755 --- a/frameworks/onnxruntime/1.24.1-gpu/test.sh +++ b/frameworks/onnxruntime/1.24.1-gpu/test.sh @@ -10,12 +10,12 @@ echo "==========================================" # 测试 1: Python 和 CUDA 环境 echo -n "测试 1: Python 和 CUDA 环境... " -docker run --rm "$IMAGE" bash -c "python3.11 --version && nvcc --version" > /dev/null 2>&1 +docker run --rm "$IMAGE" bash -c "python3.11 --version && /usr/local/cuda/bin/nvcc --version" > /dev/null 2>&1 if [ $? -eq 0 ]; then echo "✓" else echo "✗" - docker run --rm "$IMAGE" bash -c "python3.11 --version && nvcc --version" + docker run --rm "$IMAGE" bash -c "python3.11 --version && /usr/local/cuda/bin/nvcc --version" exit 1 fi diff --git a/frameworks/onnxruntime/1.24.2-gpu/test.sh b/frameworks/onnxruntime/1.24.2-gpu/test.sh index 4613042..5bb7e83 100755 --- a/frameworks/onnxruntime/1.24.2-gpu/test.sh +++ b/frameworks/onnxruntime/1.24.2-gpu/test.sh @@ -10,12 +10,12 @@ echo "==========================================" # 测试 1: Python 和 CUDA 环境 echo -n "测试 1: Python 和 CUDA 环境... " -docker run --rm "$IMAGE" bash -c "python3.11 --version && nvcc --version" > /dev/null 2>&1 +docker run --rm "$IMAGE" bash -c "python3.11 --version && /usr/local/cuda/bin/nvcc --version" > /dev/null 2>&1 if [ $? -eq 0 ]; then echo "✓" else echo "✗" - docker run --rm "$IMAGE" bash -c "python3.11 --version && nvcc --version" + docker run --rm "$IMAGE" bash -c "python3.11 --version && /usr/local/cuda/bin/nvcc --version" exit 1 fi diff --git a/frameworks/onnxruntime/1.24.3-gpu/test.sh b/frameworks/onnxruntime/1.24.3-gpu/test.sh index 6eee19d..6d26648 100755 --- a/frameworks/onnxruntime/1.24.3-gpu/test.sh +++ b/frameworks/onnxruntime/1.24.3-gpu/test.sh @@ -10,12 +10,12 @@ echo "==========================================" # 测试 1: Python 和 CUDA 环境 echo -n "测试 1: Python 和 CUDA 环境... " -docker run --rm "$IMAGE" bash -c "python3.11 --version && nvcc --version" > /dev/null 2>&1 +docker run --rm "$IMAGE" bash -c "python3.11 --version && /usr/local/cuda/bin/nvcc --version" > /dev/null 2>&1 if [ $? -eq 0 ]; then echo "✓" else echo "✗" - docker run --rm "$IMAGE" bash -c "python3.11 --version && nvcc --version" + docker run --rm "$IMAGE" bash -c "python3.11 --version && /usr/local/cuda/bin/nvcc --version" exit 1 fi diff --git a/frameworks/onnxruntime/1.24.4-gpu/test.sh b/frameworks/onnxruntime/1.24.4-gpu/test.sh index 78e90cd..de01cd1 100755 --- a/frameworks/onnxruntime/1.24.4-gpu/test.sh +++ b/frameworks/onnxruntime/1.24.4-gpu/test.sh @@ -10,12 +10,12 @@ echo "==========================================" # 测试 1: Python 和 CUDA 环境 echo -n "测试 1: Python 和 CUDA 环境... " -docker run --rm "$IMAGE" bash -c "python3.11 --version && nvcc --version" > /dev/null 2>&1 +docker run --rm "$IMAGE" bash -c "python3.11 --version && /usr/local/cuda/bin/nvcc --version" > /dev/null 2>&1 if [ $? -eq 0 ]; then echo "✓" else echo "✗" - docker run --rm "$IMAGE" bash -c "python3.11 --version && nvcc --version" + docker run --rm "$IMAGE" bash -c "python3.11 --version && /usr/local/cuda/bin/nvcc --version" exit 1 fi -- Gitee From df3b4b19dc500795c650785a23fa58df121f3a27 Mon Sep 17 00:00:00 2001 From: Anstarc Date: Tue, 5 May 2026 16:28:53 +0800 Subject: [PATCH 4/4] add torch --- frameworks/onnxruntime/1.24.1-gpu/Dockerfile | 3 +++ frameworks/onnxruntime/1.24.2-gpu/Dockerfile | 3 +++ frameworks/onnxruntime/1.24.3-gpu/Dockerfile | 3 +++ frameworks/onnxruntime/1.24.4-gpu/Dockerfile | 3 +++ 4 files changed, 12 insertions(+) diff --git a/frameworks/onnxruntime/1.24.1-gpu/Dockerfile b/frameworks/onnxruntime/1.24.1-gpu/Dockerfile index e7a6cdf..34f02fa 100644 --- a/frameworks/onnxruntime/1.24.1-gpu/Dockerfile +++ b/frameworks/onnxruntime/1.24.1-gpu/Dockerfile @@ -24,6 +24,9 @@ RUN pip3.11 install --no-cache-dir \ numpy \ protobuf +RUN pip3.11 install --no-cache-dir \ + torch torchvision --index-url https://download.pytorch.org/whl/cu128 + # 设置 GPU 环境变量 ENV NVIDIA_VISIBLE_DEVICES=all ENV CUDA_MODULE_LOADING=LAZY diff --git a/frameworks/onnxruntime/1.24.2-gpu/Dockerfile b/frameworks/onnxruntime/1.24.2-gpu/Dockerfile index e404c1e..a3e220b 100644 --- a/frameworks/onnxruntime/1.24.2-gpu/Dockerfile +++ b/frameworks/onnxruntime/1.24.2-gpu/Dockerfile @@ -24,6 +24,9 @@ RUN pip3.11 install --no-cache-dir \ numpy \ protobuf +RUN pip3.11 install --no-cache-dir \ + torch torchvision --index-url https://download.pytorch.org/whl/cu128 + # 设置 GPU 环境变量 ENV NVIDIA_VISIBLE_DEVICES=all ENV CUDA_MODULE_LOADING=LAZY diff --git a/frameworks/onnxruntime/1.24.3-gpu/Dockerfile b/frameworks/onnxruntime/1.24.3-gpu/Dockerfile index cb370e4..2da1827 100644 --- a/frameworks/onnxruntime/1.24.3-gpu/Dockerfile +++ b/frameworks/onnxruntime/1.24.3-gpu/Dockerfile @@ -24,6 +24,9 @@ RUN pip3.11 install --no-cache-dir \ numpy \ protobuf +RUN pip3.11 install --no-cache-dir \ + torch torchvision --index-url https://download.pytorch.org/whl/cu128 + # 设置 GPU 环境变量 ENV NVIDIA_VISIBLE_DEVICES=all ENV CUDA_MODULE_LOADING=LAZY diff --git a/frameworks/onnxruntime/1.24.4-gpu/Dockerfile b/frameworks/onnxruntime/1.24.4-gpu/Dockerfile index 79dde7f..8cfcece 100644 --- a/frameworks/onnxruntime/1.24.4-gpu/Dockerfile +++ b/frameworks/onnxruntime/1.24.4-gpu/Dockerfile @@ -24,6 +24,9 @@ RUN pip3.11 install --no-cache-dir \ numpy \ protobuf +RUN pip3.11 install --no-cache-dir \ + torch torchvision --index-url https://download.pytorch.org/whl/cu128 + # 设置 GPU 环境变量 ENV NVIDIA_VISIBLE_DEVICES=all ENV CUDA_MODULE_LOADING=LAZY -- Gitee