progress: common data directory

This commit is contained in:
Oleg Sheynin 2024-06-04 00:57:49 +00:00
parent 1013e80286
commit 3a004e712a
6 changed files with 37 additions and 23 deletions

5
.gitignore vendored
View File

@ -8,4 +8,7 @@ fastbook
*.jpeg *.jpeg
#**/pytorch/jupyter-settings #**/pytorch/jupyter-settings
**/workspaces **/workspaces
**/user-settings **/user-settings
.Trash*
*pkl
data

View File

@ -3,10 +3,12 @@
# ------------------------------------------------------- # -------------------------------------------------------
FROM pytorch/pytorch:latest FROM pytorch/pytorch:latest
# Install JupyterLab RUN useradd -m -s /bin/bash oleg
RUN pip install jupyterlab
# Install additional packages # Install packages
RUN pip install jupyterlab
RUN pip install pandas
RUN pip install numpy
RUN pip install plotly RUN pip install plotly
RUN pip install fastbook RUN pip install fastbook

View File

@ -10,7 +10,7 @@ services:
jupyter: jupyter:
user: "oleg" user: "oleg"
build: ${ROOT_DIR} build: ${ROOT_DIR}
image: cvtt/jupyter_gpu_pt:v1.0.8 image: cvtt/jupyter_gpu_pt:v1.0.9
container_name: jupyter_gpu_pt container_name: jupyter_gpu_pt
runtime: nvidia runtime: nvidia
environment: environment:
@ -20,6 +20,7 @@ services:
- ${ROOT_DIR}/jupyter_settings/user-settings:/root/.jupyter/lab/user-settings - ${ROOT_DIR}/jupyter_settings/user-settings:/root/.jupyter/lab/user-settings
- ${ROOT_DIR}/jupyter_settings/workspaces:/root/.jupyter/lab/workspaces - ${ROOT_DIR}/jupyter_settings/workspaces:/root/.jupyter/lab/workspaces
- ${ROOT_DIR}/.cache/torch:/root/.cache/torch - ${ROOT_DIR}/.cache/torch:/root/.cache/torch
- /opt/jupyter_gpu/data:/workspace/data
ports: ports:
- "${JUPYTER_PORT}:${JUPYTER_PORT}" - "${JUPYTER_PORT}:${JUPYTER_PORT}"
shm_size: "8gb" shm_size: "8gb"

View File

@ -13,8 +13,9 @@ RUN useradd -m -s /bin/bash oleg
RUN mkdir -p /workspace/tf_cache RUN mkdir -p /workspace/tf_cache
# Install packages # Install packages
RUN pip install tensorflow-gpu
RUN pip install jupyterlab RUN pip install jupyterlab
RUN pip install pandas
RUN pip install numpy
RUN pip install plotly RUN pip install plotly
# Install additional applications # Install additional applications
@ -28,4 +29,4 @@ WORKDIR /workspace
EXPOSE ${JUPYTER_PORT} EXPOSE ${JUPYTER_PORT}
# Command to run JupyterLab # Command to run JupyterLab
CMD ["jupyter-lab", "--ip=0.0.0.0", "--port=9999", "--no-browser", "--allow-root", "--NotebookApp.token=''"] CMD ["sh", "-c", "jupyter lab --ip=0.0.0.0 --port=9999 --allow-root --no-browser --NotebookApp.token=''"]

View File

@ -10,7 +10,7 @@ services:
jupyter: jupyter:
user: "oleg" user: "oleg"
build: ${ROOT_DIR} build: ${ROOT_DIR}
image: cvtt/jupyter_gpu_tf:v1.0.2 image: cvtt/jupyter_gpu_tf:v1.0.4
container_name: jupyter_gpu_tf container_name: jupyter_gpu_tf
runtime: nvidia runtime: nvidia
environment: environment:
@ -20,6 +20,7 @@ services:
- ${ROOT_DIR}/notebooks:/workspace - ${ROOT_DIR}/notebooks:/workspace
- ${ROOT_DIR}/jupyter_settings/user-settings:/home/oleg/.jupyter/lab/user-settings - ${ROOT_DIR}/jupyter_settings/user-settings:/home/oleg/.jupyter/lab/user-settings
- ${ROOT_DIR}/jupyter_settings/workspaces:/home/oleg/.jupyter/lab/workspaces - ${ROOT_DIR}/jupyter_settings/workspaces:/home/oleg/.jupyter/lab/workspaces
- /opt/jupyter_gpu/data:/workspace/data
ports: ports:
- "${JUPYTER_PORT}:${JUPYTER_PORT}" - "${JUPYTER_PORT}:${JUPYTER_PORT}"
shm_size: "8gb" shm_size: "8gb"

View File

@ -2,7 +2,7 @@
"cells": [ "cells": [
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 2, "execution_count": 3,
"id": "000d047d-3dfd-48cb-a3c3-bd1ad7c78b71", "id": "000d047d-3dfd-48cb-a3c3-bd1ad7c78b71",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
@ -12,28 +12,22 @@
"text": [ "text": [
"TensorFlow version: 2.16.1\n", "TensorFlow version: 2.16.1\n",
"GPUs detected: 1\n", "GPUs detected: 1\n",
" - /physical_device:GPU:0\n" " - PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"2024-06-03 21:52:43.785816: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
"2024-06-03 21:52:43.795907: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
"2024-06-03 21:52:43.799970: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n"
] ]
} }
], ],
"source": [ "source": [
"import os\n",
"import tensorflow as tf\n", "import tensorflow as tf\n",
"\n", "\n",
"os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n",
"\n",
"def get_available_gpus():\n", "def get_available_gpus():\n",
" gpus = tf.config.list_physical_devices('GPU')\n", " gpus = tf.config.list_physical_devices('GPU')\n",
" if gpus:\n", " if gpus:\n",
" print(f\"GPUs detected: {len(gpus)}\")\n", " print(f\"GPUs detected: {len(gpus)}\")\n",
" for gpu in gpus:\n", " for gpu in gpus:\n",
" print(f\" - {gpu.name}\")\n", " print(f\" - {gpu}\")\n",
" else:\n", " else:\n",
" print(\"No GPUs detected.\")\n", " print(\"No GPUs detected.\")\n",
"\n", "\n",
@ -44,11 +38,23 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 2,
"id": "324dc7a1-61b8-4384-bfed-4385e7dbe49d", "id": "324dc7a1-61b8-4384-bfed-4385e7dbe49d",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
"source": [] {
"name": "stdout",
"output_type": "stream",
"text": [
"PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')\n"
]
}
],
"source": [
"gpus = tf.config.list_physical_devices('GPU')\n",
"for gpu in gpus:\n",
" print(gpu)"
]
} }
], ],
"metadata": { "metadata": {