henry000 commited on
Commit
853bfba
Β·
1 Parent(s): 018c1a0

πŸš€ [Refactor] Docker structure, move to docker dir

Browse files
Dockerfile.gpu DELETED
@@ -1,129 +0,0 @@
1
- FROM nvcr.io/nvidia/tensorrt:24.02-py3
2
-
3
- ENV DEBIAN_FRONTEND=noninteractive
4
- ARG USERNAME=user
5
- ARG WORKDIR=/home/${USERNAME}/workdir
6
- ARG PYCUDAVER=2022.2.2
7
- ARG TORCHVER=2.1.0
8
- ARG TORCHVISIONVER=0.16.0
9
- ARG ONNXVER=1.16.1
10
- ARG ONNXRUNTIMEVER=1.18.0
11
- ARG ONNXSIMVER=0.4.30
12
- ARG H5PYVER=3.11.0
13
- ARG PSUTILVER=5.9.8
14
- ARG CMAKEVER=3.29.3
15
- ARG FLATBUFFERSVER=23.5.26
16
- ARG PACKAGINGVER=24.0
17
- ARG WHEELVER=0.43.0
18
-
19
- SHELL ["/bin/bash", "-c"]
20
-
21
- COPY requirements.txt /requirements.txt
22
-
23
- ENV CUDA_HOME=/usr/local/cuda
24
- ENV PATH=${PATH}:${CUDA_HOME}/bin
25
- ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${CUDA_HOME}/lib64
26
-
27
- RUN apt-get update \
28
- && apt-get install -y \
29
- sudo \
30
- curl \
31
- gcc \
32
- git \
33
- make \
34
- wget \
35
- zlib1g \
36
- protobuf-compiler \
37
- libgl1-mesa-dev \
38
- graphviz \
39
- python-is-python3 \
40
- && apt clean \
41
- && rm -rf /var/lib/apt/lists/*
42
-
43
- # Make user
44
- RUN echo "root:root" | chpasswd \
45
- && useradd \
46
- --create-home \
47
- --home-dir /home/${USERNAME} \
48
- --shell /bin/bash \
49
- --user-group \
50
- --groups adm,sudo \
51
- ${USERNAME} \
52
- && echo "${USERNAME}:${USERNAME}" | chpasswd \
53
- && cat /dev/null > /etc/sudoers.d/${USERNAME} \
54
- && echo "%${USERNAME} ALL=(ALL) NOPASSWD: ALL" >> \
55
- /etc/sudoers.d/${USERNAME} \
56
- && mkdir -p ${WORKDIR} \
57
- && chown ${USERNAME}:${USERNAME} ${WORKDIR}
58
-
59
- USER ${USERNAME}
60
- WORKDIR ${WORKDIR}
61
-
62
- # Install Torch
63
- RUN pip install \
64
- --index-url https://download.pytorch.org/whl/cu121 \
65
- torch==${TORCHVER} \
66
- torchvision==${TORCHVISIONVER}
67
-
68
- # Install other pip packages
69
- RUN pip install \
70
- psutil==${PSUTILVER} \
71
- onnx==${ONNXVER} \
72
- pycuda==${PYCUDAVER} \
73
- onnxsim==${ONNXSIMVER} \
74
- h5py==${H5PYVER} \
75
- flatbuffers==${FLATBUFFERSVER} \
76
- cmake==${CMAKEVER} \
77
- packaging==${PACKAGINGVER} \
78
- wheel==${WHEELVER} \
79
- && sudo rm /usr/local/bin/cmake
80
-
81
- # Install onnx-tensorrt
82
- RUN git clone -b release/8.6-GA --recursive https://github.com/onnx/onnx-tensorrt ../onnx-tensorrt \
83
- && export PATH=${PATH}:${HOME}/.local/bin \
84
- && pushd ../onnx-tensorrt \
85
- && mkdir -p build \
86
- && pushd build \
87
- && cmake .. -DTENSORRT_ROOT=/usr/src/tensorrt \
88
- && make -j$(nproc) \
89
- && sudo make install \
90
- && popd \
91
- && popd \
92
- && echo 'pushd ../onnx-tensorrt > /dev/null' >> ~/.bashrc \
93
- # At docker build time, setup.py fails because NVIDIA's physical GPU device cannot be detected.
94
- # Therefore, a workaround is applied to configure setup.py to run on first access.
95
- && echo 'python setup.py install --user 1>/dev/null 2>/dev/null' >> ~/.bashrc \
96
- && echo 'popd > /dev/null' >> ~/.bashrc \
97
- && echo 'export CUDA_MODULE_LOADING=LAZY' >> ~/.bashrc \
98
- && echo 'export PATH=${PATH}:/usr/src/tensorrt/bin:${HOME}/onnx-tensorrt/build' >> ~/.bashrc
99
-
100
- # Build onnxruntime-gpu / Install onnxruntime-gpu
101
- RUN git clone -b v${ONNXRUNTIMEVER} https://github.com/microsoft/onnxruntime.git \
102
- && pushd onnxruntime \
103
- && export PATH=${PATH}:${HOME}/.local/bin \
104
- && sudo chmod +x build.sh \
105
- && \
106
- ./build.sh \
107
- --config Release \
108
- --cudnn_home /usr/lib/x86_64-linux-gnu/ \
109
- --cuda_home /usr/local/cuda \
110
- --use_tensorrt \
111
- --use_cuda \
112
- --tensorrt_home /usr/src/tensorrt/ \
113
- --enable_pybind \
114
- --build_wheel \
115
- --parallel $(nproc) \
116
- --skip_tests \
117
- && pip install nvidia-pyindex \
118
- && pip install onnx-graphsurgeon \
119
- && pip install simple_onnx_processing_tools \
120
- && pip uninstall onnxruntime onnxruntime-gpu \
121
- && pip install --user build/Linux/Release/dist/onnxruntime_gpu-${ONNXRUNTIMEVER}-cp310-cp310-linux_x86_64.whl \
122
- && popd \
123
- && rm -rf onnxruntime
124
-
125
- # Install requirements
126
- RUN pip install -r /requirements.txt
127
-
128
- # Setting pip package path
129
- RUN echo 'export PATH=${PATH}:${HOME}/.local/bin' >> ~/.bashrc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.dockerignore β†’ docker/.dockerignore RENAMED
File without changes
Dockerfile.cpu β†’ docker/Dockerfile RENAMED
@@ -1,23 +1,11 @@
1
- FROM ubuntu:22.04
 
2
 
3
- ENV DEBIAN_FRONTEND=noninteractive
4
  ARG USERNAME=user
5
- ARG WORKDIR=/home/${USERNAME}/workdir
6
- ARG TORCHVER=2.1.0
7
- ARG TORCHVISIONVER=0.16.0
8
- ARG ONNXVER=1.16.1
9
- ARG ONNXRUNTIMEVER=1.18.0
10
- ARG ONNXSIMVER=0.4.30
11
- ARG H5PYVER=3.11.0
12
- ARG PSUTILVER=5.9.8
13
- ARG CMAKEVER=3.29.3
14
- ARG FLATBUFFERSVER=23.5.26
15
- ARG PACKAGINGVER=24.0
16
- ARG WHEELVER=0.43.0
17
 
18
  SHELL ["/bin/bash", "-c"]
19
 
20
- COPY requirements.txt /requirements.txt
21
 
22
  ENV CUDA_HOME=/usr/local/cuda
23
  ENV PATH=${PATH}:${CUDA_HOME}/bin
@@ -59,26 +47,18 @@ RUN echo "root:root" | chpasswd \
59
  USER ${USERNAME}
60
  WORKDIR ${WORKDIR}
61
 
62
- # Install Torch
63
- RUN pip install \
64
- --index-url https://download.pytorch.org/whl/cpu \
65
- torch==${TORCHVER} \
66
- torchvision==${TORCHVISIONVER}
67
 
68
- # Install other pip packages
69
- RUN pip install \
70
- psutil==${PSUTILVER} \
71
- onnx==${ONNXVER} \
72
- onnxruntime==${ONNXRUNTIMEVER} \
73
- onnxsim==${ONNXSIMVER} \
74
- h5py==${H5PYVER} \
75
- flatbuffers==${FLATBUFFERSVER} \
76
- cmake==${CMAKEVER} \
77
- packaging==${PACKAGINGVER} \
78
- wheel==${WHEELVER}
79
 
80
- # Install requirements
81
- RUN pip install -r /requirements.txt
82
 
83
- # Setting pip package path
 
 
84
  RUN echo 'export PATH=${PATH}:${HOME}/.local/bin' >> ~/.bashrc
 
 
 
 
1
+ ARG BASE_IMG=nvidia/cuda:12.4.1-devel-ubuntu22.04
2
+ FROM ${BASE_IMG}
3
 
 
4
  ARG USERNAME=user
5
+ ARG WORKDIR=/home/${USERNAME}/YOLO
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  SHELL ["/bin/bash", "-c"]
8
 
 
9
 
10
  ENV CUDA_HOME=/usr/local/cuda
11
  ENV PATH=${PATH}:${CUDA_HOME}/bin
 
47
  USER ${USERNAME}
48
  WORKDIR ${WORKDIR}
49
 
50
+ COPY docker/requirements-locked.txt /app/requirements-locked.txt
51
+ COPY requirements.txt /app/requirements.txt
 
 
 
52
 
53
+ # Install any needed packages specified in requirements.txt
54
+ RUN pip install --no-cache-dir -r /app/requirements-locked.txt
55
+ RUN pip install --no-cache-dir -r /app/requirements.txt
 
 
 
 
 
 
 
 
56
 
 
 
57
 
58
+ RUN git clone https://github.com/WongKinYiu/YOLO.git .
59
+
60
+ # Ensure pip-installed packages are available in the PATH
61
  RUN echo 'export PATH=${PATH}:${HOME}/.local/bin' >> ~/.bashrc
62
+
63
+ # Optional: Source .bashrc to apply changes in the current session
64
+ RUN source ~/.bashrc
docker/requirements-locked.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ pycocotools==2.0.7
2
+ torch==2.2.1
3
+ torchvision==0.17.1
4
+ setuptools>=60.0
5
+ numpy==1.23.5