TVM, a deep learning compiler stack for CPUs, GPUs and accelerators. This repository presents some tips to use TVM to deploy neural network models.
pip3 install apache-tvm
pip3 install apache-tvm-cu102 -f https://tlcpack.ai/wheels
sudo lshw -C display
orlspci | grep -i —color ‘vga|3d|2d’
sudo apt install ocl-icd-opencl-dev
sudo apt install clinfo
sudo apt install -y llvm
llvm-config —version
to check your versionsudo apt update
sudo apt install -y python3 python3-dev python3-setuptools gcc libtinfo-dev zlib1g-dev build-essential cmake vim git
wget https://github.com/oneapi-src/oneDNN/archive/refs/tags/v2.6.tar.gz
tar xf v2.6.tar.gz
cd oneDNN-2.6/
cmake . -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_LIBDIR=lib
make -j4
sudo make install
sudo apt install -y python3-pip
pip3 install —upgrade pip
pip3 install numpy decorator attrs
pip3 install pillow tensorflow tflite opencv-python easydict typing-extensions psutil scipy tornado cloudpickle
pip3 install onnx onnxoptimizer
pip3 install onnxruntime
for CPU; pip3 install onnxruntime-gpu
for CUDApip3 list
or pip3 freeze
pip3 freeze > requirements.txt
tar zxvf
the downloaded *.tar.gzcd ~
git clone —recursive https://github.com/apache/tvm.git
cd tvm
mkdir build
cp cmake/config.cmake build
cd build
vi config.cmake
cmake ..
make -j4
vi ~/.bashrc
source ~/.bashrc
python3 -c “import tvm”
python3 compile_run_mobilenetv2.py
Prediction=> id: 282 name: tabby
print(lib.get_params())
print(lib.get_lib().imported_modules)
print(lib.get_lib().imported_modules[0].get_source())
print(lib.get_lib().imported_modules[1].get_source())
sudo apt install apt-file
sudo apt update
apt-file find libOpenCL.so
sudo add-apt-repository ppa:intel-opencl/intel-opencl
sudo apt update
sudo apt install intel-opencl-icd