Skip to content

Instantly share code, notes, and snippets.

@bio-punk
Last active January 29, 2026 18:50
Show Gist options
  • Select an option

  • Save bio-punk/392e80de53eb16ecef3eedddf9eccb4c to your computer and use it in GitHub Desktop.

Select an option

Save bio-punk/392e80de53eb16ecef3eedddf9eccb4c to your computer and use it in GitHub Desktop.
lammps voro++ deepmd #deepmd #lammps #build #x86 #container
#!/bin/bash
#SBATCH --gpus=4
#SBATCH
cd /data/run01/scv6266/dev260127
mkdir -p tmp
export TMPDIR=/data/run01/scv6266/dev260127/tmp
module load apptainer/1.2.4
# docker image to apptainerta image
apptainer -v build --fakeroot pt_23.07.sif docker-archive://pt_23.07.tar
#!/bin/bash
source ~/.bashrc
# 自定义变量
ALL_PREFIX=/data/run01/$USER/dev260127
CONDA_ENV_NAME=python_env
PYTHON_VER=3.10
export CUDA_VERSION=12.1
# CONDA 环境准备
source /data/apps/miniforge/24.1.2/etc/profile.d/conda.sh
conda create -p ${ALL_PREFIX}/${CONDA_ENV_NAME} \
-c conda-forge \
python=${PYTHON_VER} \
-y
conda activate ${ALL_PREFIX}/${CONDA_ENV_NAME}
# PyTorch 包准备
# wget https://mirrors.aliyun.com/pytorch-wheels/cu118/torch-2.7.1+cu118-cp39-cp39-manylinux_2_28_x86_64.whl
wget https://mirrors.aliyun.com/pytorch-wheels/cu121/torch-2.5.1+cu121-cp310-cp310-linux_x86_64.whl
# PIP 环境准备
export PIP_INDEX_URL=https://mirrors.bfsu.edu.cn/pypi/web/simple
pip install \
"numpy<2" \
torchaudio torchvision \
torch-2.5.1+cu121-cp310-cp310-linux_x86_64.whl \
tensorflow==2.19
#!/bin/bash
# 自定义变量
ALL_PREFIX=/data/run01/$USER/dev260127
CONDA_ENV_NAME=python_env
PYTHON_VER=3.10
export CUDA_VERSION=12.1
CLIENT_NODE=ln02
INSTALL_PREFIX=${ALL_PREFIX}/install
# 自定义bashrc
source ~/.bashrc
# 反向代理配置
export https_proxy=http://127.0.0.1:7897
export http_proxy=http://127.0.0.1:7897
# 挂载检查
ls -lah ~
# 测试网络
wget https://www.baidu.com
if [ $? -ne 0 ]; then
echo "Network test failed"
exit 1
else
echo "Network test succeeded"
rm index.html
fi
# 进入conda环境
conda activate ${ALL_PREFIX}/${CONDA_ENV_NAME}
if [ $? -ne 0 ]; then
echo "Activate conda environment failed"
exit 1
fi
# 进入工作目录
DEEPMD_SRC=${ALL_PREFIX}/deepmd-kit_tf
cd ${DEEPMD_SRC}
# Install deepmd-kit Python interface
#
# This function builds the Python interface of deepmd-kit with
# specified configuration. It installs the package with the
# following options:
# - DP_VARIANT=cuda: Build with CUDA support
# - CUDAToolkit_ROOT=$CUDA_HOME: Specify the path of CUDA
# toolkit
# - DP_ENABLE_TENSORFLOW=1: Enable building with TensorFlow
# - DP_ENABLE_PYTORCH=1: Enable building with PyTorch
# - DP_ENABLE_NATIVE_OPTIMIZATION=1: Enable native optimization
# - "numpy<2": Install numpy with version less than 2
#
# If the build fails, it exits with code 1. Otherwise, it
# prints a success message
install_py_interface() {
# 安装
DP_VARIANT=cuda \
CUDAToolkit_ROOT=$CUDA_HOME \
DP_ENABLE_TENSORFLOW=1 \
DP_ENABLE_PYTORCH=1 \
DP_ENABLE_NATIVE_OPTIMIZATION=1 \
pip install -v . "numpy<2"
if [ $? -ne 0 ]; then
echo "Build failed"
exit 1
else
echo "Build Python interface succeeded"
fi
}
pip list | grep deepmd-kit | grep 0.1.dev3160+gf4fec1db7
if [ $? -ne 0 ]; then
install_py_interface
fi
# Builds the C++ interface of deepmd-kit with specified configuration.
# It installs the package with the following options:
# - CMAKE_BUILD_TYPE=Debug: Build with debug information
# - CMAKE_INSTALL_PREFIX=${INSTALL_PREFIX}: Specify the installation prefix
# - CMAKE_CXX_STANDARD=17: Specify the C++ standard
# - CMAKE_CXX_STANDARD_REQUIRED=ON: Require the specified C++ standard
# - CMAKE_CUDA_STANDARD=17: Specify the CUDA standard
# - CMAKE_CUDA_STANDARD_REQUIRED=ON: Require the specified CUDA standard
# - CMAKE_CUDA_COMPILER=`which nvcc`: Specify the CUDA compiler
# - MPI_CXX_COMPILER=`which mpicxx`: Specify the MPI C++ compiler
# - CMAKE_CXX_COMPILER=$CXX: Specify the C++ compiler
# - ENABLE_TENSORFLOW=ON: Enable building with TensorFlow
# - ENABLE_PYTORCH=ON: Enable building with PyTorch
# - USE_CUDA_TOOLKIT=ON: Enable using CUDA toolkit
# - CUDAToolkit_ROOT=${CUDA_HOME}: Specify the path of CUDA toolkit
# - USE_TF_PYTHON_LIBS=ON: Enable using TensorFlow Python libraries
# - USE_PT_PYTHON_LIBS=OFF: Disable using PyTorch Python libraries
# - PYTORCH_ROOT=${LIBTORCH_HOME}: Specify the path of PyTorch
# - ENABLE_NATIVE_OPTIMIZATION=1: Enable native optimization
# - CMAKE_CUDA_ARCHITECTURES=70: Specify the CUDA architectures to build for
install_cxx_interface() {
# 构建C++接口
cd $DEEPMD_SRC/source
ls build
if [ $? -eq 0 ]; then
rm -rf build
fi
mkdir -p build
cd build
unset CMAKE_PREFIX_PATH
export CMAKE_PREFIX_PATH=${LIBTORCH_HOME}
export CUDA_ROOT=${CUDA_HOME}
export CC=`which gcc`
export CXX=`which g++`
export TORCH_CUDA_ARCH_LIST="7.0"
cmake \
-D CMAKE_BUILD_TYPE=Debug \
-D CMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \
-D CMAKE_CXX_STANDARD=17 \
-D CMAKE_CXX_STANDARD_REQUIRED=ON \
-D CMAKE_CUDA_STANDARD=17 \
-D CMAKE_CUDA_STANDARD_REQUIRED=ON \
-D CMAKE_CUDA_COMPILER=`which nvcc` \
-D MPI_CXX_COMPILER=`which mpicxx` \
-D CMAKE_CXX_COMPILER=$CXX \
-D ENABLE_TENSORFLOW=ON \
-D ENABLE_PYTORCH=ON \
-D USE_CUDA_TOOLKIT=ON \
-D CUDAToolkit_ROOT=${CUDA_HOME} \
-D USE_TF_PYTHON_LIBS=ON \
-D USE_PT_PYTHON_LIBS=OFF \
-D PYTORCH_ROOT=${LIBTORCH_HOME} \
-D ENABLE_NATIVE_OPTIMIZATION=1 \
-D CMAKE_CUDA_ARCHITECTURES=70 \
..
make verbose=1 -j8
if [ $? -ne 0 ]; then
echo "Build failed"
else
echo "Build succeeded"
make install
fi
}
install_cxx_interface
#!/bin/bash
#SBATCH --gpus=1
#SBATCH
# 自定义变量
ALL_PREFIX=/data/run01/$USER/dev260127
CONDA_ENV_NAME=python_env
PYTHON_VER=3.10
export CUDA_VERSION=12.1
CLIENT_NODE=ln02
# 反向代理配置
ssh -CfNg -L 7897:127.0.0.1:7897 ${CLIENT_NODE}
# 加载模块
module load apptainer/1.2.4
# 运行
cd ~
apptainer -v exec \
--nv --fakeroot \
-B /data -B /data01 -B /data02 \
-B ${ALL_PREFIX}/files/bashrc:/root/.bashrc \
-B ${ALL_PREFIX}/files/condarc:/root/.condarc \
-B ${ALL_PREFIX}/tmp:/tmp \
${ALL_PREFIX}/sif/dev260128.sif \
bash ${ALL_PREFIX}/02_1_build_deepmd.sh
#!/bin/bash
# 自定义变量
ALL_PREFIX=/data/run01/$USER/dev260127
DEEPMD_SRC=${ALL_PREFIX}/deepmd-kit_tf
LAMMPS_SRC=${ALL_PREFIX}/lammps_23Jun2022_NVT_sep
# Install LAMMPS’s DeePMD-kit module (built-in mode)
grep "include(${DEEPMD_SRC}/source/lmp/builtin.cmake)" ${LAMMPS_SRC}/cmake/CMakeLists.txt
if [ $? -ne 0 ]; then
echo "include(${DEEPMD_SRC}/source/lmp/builtin.cmake)" >> ${LAMMPS_SRC}/cmake/CMakeLists.txt
echo "Set CmakeLists.txt of lammps succeeded"
else
echo "No need to set CmakeLists.txt of lammps"
fi
#!/bin/bash
source ~/.bashrc
# 自定义变量
ALL_PREFIX=/data/run01/$USER/dev260127
CONDA_ENV_NAME=python_env
PYTHON_VER=3.10
export CUDA_VERSION=12.1
DEEPMD_SRC=${ALL_PREFIX}/deepmd-kit_tf
LAMMPS_SRC=${ALL_PREFIX}/lammps_23Jun2022_NVT_sep
INSTALL_PREFIX=${ALL_PREFIX}/install
# 缓存cmake依赖项
# cd $ALL_PREFIX/pkgs
# python -m http.server 8888 1>$ALL_PREFIX/tmp/pkgs_${SLURM_JOB_ID}.log 2>&1 &
# 进入conda环境
conda activate ${ALL_PREFIX}/${CONDA_ENV_NAME}
if [ $? -ne 0 ]; then
echo "Activate conda environment failed"
exit 1
fi
# 配置Pytorch的CUDA参数
export TORCH_CUDA_ARCH_LIST="7.0"
# 执行deepmd前置步骤
bash ${ALL_PREFIX}/03_1_build_lammps.sh
# 清理makefile产生的中间文件
cd $LAMMPS_SRC/src
make -C ${LAMMPS_SRC}/src purge
make -C ${LAMMPS_SRC}/src no-all
make -C ${LAMMPS_SRC}/src clean-all
rm -rf Obj_* *.o *.a liblammps.so lmp_mpi lmp_serial
export CPATH=${INSTALL_PREFIX}/include/voro++:$CPATH
# 编译lammps
cd $LAMMPS_SRC
mkdir -p build_${SLURM_JOB_ID}
cd build_${SLURM_JOB_ID}
unset CMAKE_PREFIX_PATH
cmake \
-D CMAKE_BUILD_TYPE=Debug \
-D CMAKE_CXX_STANDARD=17 \
-D CMAKE_CUDA_STANDARD=17 \
-D CMAKE_CXX_COMPILER=$LAMMPS_SRC/lib/kokkos/bin/nvcc_wrapper \
-D BUILD_MPI=ON \
-D PKG_KOKKOS=ON \
-D PKG_GPU=ON \
-D CUDA_ARCH_LIST=7.0 \
-D CUDAToolkit_ROOT=$CUDA_HOME \
-D CUDA_TOOLKIT_ROOT_DIR=$CUDA_HOME \
-D FFT=KISS \
-D GPU_API=cuda \
-D CMAKE_CUDA_ARCHITECTURES="70" \
-D GPU_ARCH=sm_70 \
-D Kokkos_ENABLE_CUDA=ON \
-D Kokkos_ARCH_VOLTA70=ON \
-D Kokkos_ENABLE_CUDA_LAMBDA=ON \
-D MKL_INCLUDE_DIR="$CONDA_PREFIX/include" \
-D CMAKE_PREFIX_PATH="$LIBTORCH_HOME;$CUDA_HOME;${INSTALL_PREFIX}" \
-D CMAKE_LIBRARY_PATH=$CUDA_HOME/lib64/stubs \
-D CMAKE_MPI_C_COMPILER=mpicc \
-D CMAKE_MPI_CXX_COMPILER=mpicxx \
-D CMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \
-D BUILD_SHARED_LIBS=yes \
-D PKG_VORONOI=yes \
-D DOWNLOAD_VORO=no \
-D VORO_INCLUDE_DIR=${INSTALL_PREFIX}/include/voro++ \
-D VORO_LIBRARY=${INSTALL_PREFIX}/lib/libvoro++.a \
-D PKG_MEAM=yes \
-D PKG_REAXFF=yes \
${LAMMPS_SRC}/cmake
make -j8 VERBOSE=1
if [ $? -ne 0 ]; then
echo "LAMMPS build failed"
else
echo "LAMMPS build succeeded"
make install
echo "LAMMPS installed to $install_prefix"
fi
# make \
# yes-EXTRA-FIX \
# yes-GPU \
# yes-KSPACE \
# yes-MEAM \
# yes-REAXFF \
# yes-VORONOI \
# yes-USER-DEEPMD
#!/bin/bash
#SBATCH --gpus=1
#SBATCH
# 自定义变量
ALL_PREFIX=/data/run01/$USER/dev260127
CONDA_ENV_NAME=python_env
PYTHON_VER=3.10
export CUDA_VERSION=12.1
CLIENT_NODE=ln02
# 加载模块
module load apptainer/1.2.4
# 运行
cd ~
apptainer -v exec \
--fakeroot \
-B /data -B /data01 -B /data02 \
-B ${ALL_PREFIX}/files/bashrc:/root/.bashrc \
-B ${ALL_PREFIX}/files/condarc:/root/.condarc \
-B ${ALL_PREFIX}/tmp:/tmp \
-B ${ALL_PREFIX}/files/VORONOI_config.mk:${ALL_PREFIX}/voro++-0.4.6/config.mk \
${ALL_PREFIX}/sif/dev260128.sif \
bash ${ALL_PREFIX}/03_4_build_voro.sh
#!/bin/bash
source ~/.bashrc
# 自定义变量
ALL_PREFIX=/data/run01/$USER/dev260127
CONDA_ENV_NAME=python_env
PYTHON_VER=3.10
export CUDA_VERSION=12.1
CLIENT_NODE=ln02
INSTALL_PREFIX=${ALL_PREFIX}/install
VORO_SRC=${ALL_PREFIX}/voro++-0.4.6
# 进入conda环境
conda activate ${ALL_PREFIX}/${CONDA_ENV_NAME}
if [ $? -ne 0 ]; then
echo "Activate conda environment failed"
exit 1
fi
rm -rf ${INSTALL_PREFIX}/include/voro++
rm -f ${INSTALL_PREFIX}/lib/libvoro++.a
cd ${VORO_SRC}
make clean
make PREFIX=${INSTALL_PREFIX} install
echo "build voro++ done"
cd ${INSTALL_PREFIX}/lib/voro
rm ./*
cp ../libvoro++.a .
ar x libvoro++.a
# 检查是否包含fPIC符号
readelf -r c_loops.o | grep R_X86_64_PLT32
#!/bin/bash
#SBATCH --gpus=1
#SBATCH
# 自定义变量
ALL_PREFIX=/data/run01/$USER/dev260127
CONDA_ENV_NAME=python_env
PYTHON_VER=3.10
export CUDA_VERSION=12.1
# 加载模块
module load apptainer/1.2.4
# 运行
cd ~
apptainer -v exec \
--nv --fakeroot \
-B /data -B /data01 -B /data02 \
-B ${ALL_PREFIX}/files/bashrc:/root/.bashrc \
-B ${ALL_PREFIX}/files/condarc:/root/.condarc \
-B ${ALL_PREFIX}/tmp:/tmp \
-B ${ALL_PREFIX}/files/nvcc_wrapper:${ALL_PREFIX}/lammps_23Jun2022_NVT_sep/lib/kokkos/bin/nvcc_wrapper \
${ALL_PREFIX}/sif/dev260128.sif \
bash ${ALL_PREFIX}/03_2_build_lammps.sh
#!/bin/bash
# 加载环境
source ~/.bashrc
# 自定义变量
ALL_PREFIX=/data/run01/$USER/dev260127
CONDA_ENV_NAME=python_env
# 进入conda环境
conda activate ${ALL_PREFIX}/${CONDA_ENV_NAME}
# 测试lammps
mpirun -n 1 lmp -h
# 运行测试
cd ${ALL_PREFIX}/lammps_23Jun2022_NVT_sep/examples/voronoi
mpirun -n 1 lmp -k on g 1 -sf kk -in in.voronoi
#!/bin/bash
#SBATCH --gpus=1
# 自定义变量
ALL_PREFIX=/data/run01/$USER/dev260127
# 加载模块
module load apptainer/1.2.4
# 启动容器
cd ~
apptainer -v exec \
--nv --fakeroot \
-B /data -B /data01 -B /data02 \
-B ${ALL_PREFIX}/files/bashrc:/root/.bashrc \
-B ${ALL_PREFIX}/tmp:/tmp \
${ALL_PREFIX}/sif/dev260128.sif \
bash ${ALL_PREFIX}/10_1_run.sh

Lammps_20220623 with deepmd

启用的lammps package

  • PKG_MEAM
  • PKG_REAXFF
  • PKG_GPU
  • PKG_VORONOI
    • DOWNLOAD_VORO=no
    • VORO_INCLUDE_DIR=${INSTALL_PREFIX}/include/voro++
    • VORO_LIBRARY=${INSTALL_PREFIX}/lib/libvoro++.a
  • PKG_KOKKOS
    • Kokkos_ENABLE_CUDA
    • Kokkos_ARCH_VOLTA70
    • Kokkos_ENABLE_CUDA_LAMBDA

注意

  1. 部分步骤依赖于网络代理
  2. 需要执行unset CMAKE_PREFIX_PATH跳过conda提供的cuda环境
  3. 建议更改/tmp位置以避免临时文件过大
  4. voro++-0.4.6只提供了静态文件安装,需要修改config.mk文件支持动态库导出,这对于deepmd是必须的
  5. export TORCH_CUDA_ARCH_LIST="7.0" 手动指定目标架构,只需要编译支持V100的目标代码,可以加速编译
  6. lmp -k on g 1 -sf kk 建议使用kokkos运行
  7. 需清理makefile产生的中间文件和最终二进制来避免cmake构建出错

所需要的包

依赖

  1. 系统 glibc>=2.28 (tensorflow使用cxx11abi,所以pytorch也需要cxx11abi)
  2. cuda<=12.2 (本分区驱动最大支持cuda版本)
  3. gcc>=9
  4. openmpi
  5. cudnn
  6. nccl

选择的环境

NVIDIA container image for PyTorch, release 23.07
该环境内包含

  • Ubuntu 22.04
  • NVIDIA CUDA® 12.1.1
  • NVIDIA cuDNN 8.9.3
  • NVIDIA NCCL 2.18.3
  • OpenMPI 4.1.4+

包含安装编译需要的二进制依赖

安装步骤

0 镜像准备

依赖于

  1. apptainer 通过 environment modules 导入
  2. pt_23.07.tar 通过本地上传至集群
  3. 00_image_create.sh 由于所需内存较大,所以需要计算节点执行

在本地执行镜像拉取并上传集群

docker pull nvcr.io/nvidia/pytorch:23.07-py3
docker save -o pt_23.07.tar nvcr.io/nvidia/pytorch:23.07-py3
scp -P2222 pt_23.07.tar scv6266@[email protected]:~/run/dev260127

在集群提交

sbatch 00_image_create.sh

1 环境准备

依赖于

  1. files/bashrc 替换容器内bashrc用于conda
  2. files/condarc 替换容器内conda源
  3. 01_env_create.sh
  4. pkgs/torch-2.5.1+cu121-cp310-cp310-linux_x86_64.whl (可选项,可以提前下好缓存)

在集群登录节点执行

ALL_PREFIX=/data/run01/$USER/dev260127
module load apptainer/1.2.4
apptainer -v exec \
    --fakeroot \
    -B /data -B /data01 -B /data02 \
    -B ${ALL_PREFIX}/files/bashrc:/root/.bashrc \
    -B ${ALL_PREFIX}/files/condarc:/root/.condarc \
    -B ${ALL_PREFIX}/tmp:/tmp \
    ${ALL_PREFIX}/sif/dev260128.sif \
        bash ${ALL_PREFIX}/01_env_create.sh

2 deepmd构建

依赖于

  1. pkgs/libtorch-cxx11-abi-shared-with-deps-2.5.1+cu121.zip torch2.5.1不具备cxx11abi,需要使用libtorch,请将此包解压至libtorch
  2. files/bashrc 将libtorch添加到环境变量中
  3. http代理, pip安装deepmd时需要联网
  4. 02_1_build_deepmd.sh py接口和c++接口都进行构建
  5. 02_build_deepmd.sh deepmd会尝试寻找cuda运行时,因此推荐在计算节点编译

在集群提交

sbatch 02_build_deepmd.sh

3 lammps构建

依赖于

  1. pkgs/libtorch-cxx11-abi-shared-with-deps-2.5.1+cu121.zip
  2. pkgs/voro++-0.4.6.tar.gz 解压至voro++-0.4.6/使用
  3. files/bashrc 提供deepmd-kit的c++接口库, voro++的库
  4. files/nvcc_wrapper 提供编译接口,需要手动修改
    1. host_compiler="g++"
    2. default_arch="sm_70"
    3. nvcc_compiler=nvcc
  5. files/VORONOI_config.mk 需要手动修改 CFLAGS 支持lammps动态库导出
    • CFLAGS=-Wall -ansi -pedantic -O3 -fPIC
  6. 03_1_build_lammps.sh 修改lammps的ccmake/CMakeLists.txt来使用built-in模式构建deepmd-kit扩展,只需要执行一次
  7. 03_2_build_lammps.sh lammps构建脚本
  8. 03_3_build_voro.sh voro++构建脚本,由此拉起apptainer
  9. 03_4_build_voro.sh voro++构建脚本,此处实际执行,会检查是否包含fPIC符号
  10. 03_build_lammps.sh 提交lammps构建脚本

登录节点构建voro++-0.4.6

bash 03_3_build_voro.sh

在集群提交lammps构建任务

sbatch 03_build_lammps.sh

4 运行

依赖于

  1. files/bashrc 允许root用户执行
  2. install deepmd/voro++/lammps的二进制
  3. libtorch
  4. python_env python环境
  5. 10_1_run.sh 计算脚本
  6. 10_run.sh 提交计算脚本

提交至计算节点

sbatch 10_run.sh

example

10_1_run.sh
#!/bin/bash

# 加载环境
source ~/.bashrc

# 自定义变量
ALL_PREFIX=/data/run01/$USER/dev260127
CONDA_ENV_NAME=python_env

# 进入conda环境
conda activate ${ALL_PREFIX}/${CONDA_ENV_NAME}

# 测试lammps
mpirun -n 1 lmp -h

# 运行测试
cd ${ALL_PREFIX}/lammps_23Jun2022_NVT_sep/examples/voronoi
mpirun -n 1 lmp -k on g 1 -sf kk -in in.voronoi
10_run.sh
#!/bin/bash
#SBATCH --gpus=1

# 自定义变量
ALL_PREFIX=/data/run01/$USER/dev260127

# 加载模块
module load apptainer/1.2.4

# 启动容器
cd ~
apptainer -v exec \
    --nv --fakeroot \
    -B /data -B /data01 -B /data02 \
    -B ${ALL_PREFIX}/files/bashrc:/root/.bashrc \
    -B ${ALL_PREFIX}/tmp:/tmp \
    ${ALL_PREFIX}/sif/dev260128.sif \
        bash ${ALL_PREFIX}/10_1_run.sh
source /data/apps/miniforge/24.1.2/etc/profile.d/conda.sh
# Export environment variables for libtorch
# This function exports environment variables for libtorch,
# including LIBTORCH_HOME, PATH, CPATH, LD_LIBRARY_PATH,
# LIBRARY_PATH, and MANPATH.
export_libtorch() {
export LIBTORCH_HOME=/data/run01/scv6266/dev260127/libtorch
export PATH=${LIBTORCH_HOME}/bin:$PATH
export CPATH=${LIBTORCH_HOME}/include:$CPATH
export LD_LIBRARY_PATH=${LIBTORCH_HOME}/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=${LIBTORCH_HOME}/lib:$LIBRARY_PATH
export MANPATH=${LIBTORCH_HOME}/share/man:$MANPATH
}
export_libtorch
# Exports environment variables for CUDA Intermediate Code Compiler.
#
# This function exports environment variables for cicc,
# including PATH, LD_LIBRARY_PATH, LIBRARY_PATH, and CPATH.
# It sets the prefix to CUDA_HOME.
export_cicc() {
# 添加cicc
export PATH=${CUDA_HOME}/nvvm/bin:$PATH
export LD_LIBRARY_PATH=${CUDA_HOME}/nvvm/lib64:$LD_LIBRARY_PATH
export LIBRARY_PATH=${CUDA_HOME}/nvvm/lib64:$LIBRARY_PATH
export CPATH=${CUDA_HOME}/nvvm/include:$CPATH
}
export_cicc
# Exports environment variables for installed packages.
#
# This function exports environment variables for installed packages,
# including PATH, CPATH, LD_LIBRARY_PATH, LIBRARY_PATH, and MANPATH.
# It sets the prefix to INSTALL_PREFIX=/data/run01/scv6266/dev260127/install.
export_install() {
INSTALL_PREFIX=/data/run01/scv6266/dev260127/install
export PATH=${INSTALL_PREFIX}/bin:$PATH
export CPATH=${INSTALL_PREFIX}/include:$CPATH
export LD_LIBRARY_PATH=${INSTALL_PREFIX}/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=${INSTALL_PREFIX}/lib:$LIBRARY_PATH
export MANPATH=${INSTALL_PREFIX}/share/man:$MANPATH
}
export_install
export OMPI_ALLOW_RUN_AS_ROOT=1
export OMPI_ALLOW_RUN_AS_ROOT_CONFIRM=1
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment