From 3a004e712a8783ef1e30745b401aed27b1c88417 Mon Sep 17 00:00:00 2001 From: Oleg Sheynin Date: Tue, 4 Jun 2024 00:57:49 +0000 Subject: [PATCH] progress: common data directory --- .gitignore | 5 +++- pytorch/Dockerfile | 8 +++--- pytorch/docker-compose.yml | 3 ++- tensorflow/Dockerfile | 5 ++-- tensorflow/docker-compose.yml | 3 ++- tensorflow/notebooks/Testing GPU.ipynb | 36 +++++++++++++++----------- 6 files changed, 37 insertions(+), 23 deletions(-) diff --git a/.gitignore b/.gitignore index 1507cd6..712135b 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,7 @@ fastbook *.jpeg #**/pytorch/jupyter-settings **/workspaces -**/user-settings \ No newline at end of file +**/user-settings +.Trash* +*pkl +data diff --git a/pytorch/Dockerfile b/pytorch/Dockerfile index e72da10..4615ff0 100644 --- a/pytorch/Dockerfile +++ b/pytorch/Dockerfile @@ -3,10 +3,12 @@ # ------------------------------------------------------- FROM pytorch/pytorch:latest -# Install JupyterLab -RUN pip install jupyterlab +RUN useradd -m -s /bin/bash oleg -# Install additional packages +# Install packages +RUN pip install jupyterlab +RUN pip install pandas +RUN pip install numpy RUN pip install plotly RUN pip install fastbook diff --git a/pytorch/docker-compose.yml b/pytorch/docker-compose.yml index 8c181e0..9405b1a 100644 --- a/pytorch/docker-compose.yml +++ b/pytorch/docker-compose.yml @@ -10,7 +10,7 @@ services: jupyter: user: "oleg" build: ${ROOT_DIR} - image: cvtt/jupyter_gpu_pt:v1.0.8 + image: cvtt/jupyter_gpu_pt:v1.0.9 container_name: jupyter_gpu_pt runtime: nvidia environment: @@ -20,6 +20,7 @@ services: - ${ROOT_DIR}/jupyter_settings/user-settings:/root/.jupyter/lab/user-settings - ${ROOT_DIR}/jupyter_settings/workspaces:/root/.jupyter/lab/workspaces - ${ROOT_DIR}/.cache/torch:/root/.cache/torch + - /opt/jupyter_gpu/data:/workspace/data ports: - "${JUPYTER_PORT}:${JUPYTER_PORT}" shm_size: "8gb" diff --git a/tensorflow/Dockerfile b/tensorflow/Dockerfile index 97776b9..1781b65 100644 --- a/tensorflow/Dockerfile +++ b/tensorflow/Dockerfile @@ -13,8 +13,9 @@ RUN useradd -m -s /bin/bash oleg RUN mkdir -p /workspace/tf_cache # Install packages -RUN pip install tensorflow-gpu RUN pip install jupyterlab +RUN pip install pandas +RUN pip install numpy RUN pip install plotly # Install additional applications @@ -28,4 +29,4 @@ WORKDIR /workspace EXPOSE ${JUPYTER_PORT} # Command to run JupyterLab -CMD ["jupyter-lab", "--ip=0.0.0.0", "--port=9999", "--no-browser", "--allow-root", "--NotebookApp.token=''"] +CMD ["sh", "-c", "jupyter lab --ip=0.0.0.0 --port=9999 --allow-root --no-browser --NotebookApp.token=''"] \ No newline at end of file diff --git a/tensorflow/docker-compose.yml b/tensorflow/docker-compose.yml index bc67f84..c96801b 100644 --- a/tensorflow/docker-compose.yml +++ b/tensorflow/docker-compose.yml @@ -10,7 +10,7 @@ services: jupyter: user: "oleg" build: ${ROOT_DIR} - image: cvtt/jupyter_gpu_tf:v1.0.2 + image: cvtt/jupyter_gpu_tf:v1.0.4 container_name: jupyter_gpu_tf runtime: nvidia environment: @@ -20,6 +20,7 @@ services: - ${ROOT_DIR}/notebooks:/workspace - ${ROOT_DIR}/jupyter_settings/user-settings:/home/oleg/.jupyter/lab/user-settings - ${ROOT_DIR}/jupyter_settings/workspaces:/home/oleg/.jupyter/lab/workspaces + - /opt/jupyter_gpu/data:/workspace/data ports: - "${JUPYTER_PORT}:${JUPYTER_PORT}" shm_size: "8gb" diff --git a/tensorflow/notebooks/Testing GPU.ipynb b/tensorflow/notebooks/Testing GPU.ipynb index 66f48e9..324021e 100644 --- a/tensorflow/notebooks/Testing GPU.ipynb +++ b/tensorflow/notebooks/Testing GPU.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "id": "000d047d-3dfd-48cb-a3c3-bd1ad7c78b71", "metadata": {}, "outputs": [ @@ -12,28 +12,22 @@ "text": [ "TensorFlow version: 2.16.1\n", "GPUs detected: 1\n", - " - /physical_device:GPU:0\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2024-06-03 21:52:43.785816: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n", - "2024-06-03 21:52:43.795907: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n", - "2024-06-03 21:52:43.799970: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n" + " - PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')\n" ] } ], "source": [ + "import os\n", "import tensorflow as tf\n", "\n", + "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n", + "\n", "def get_available_gpus():\n", " gpus = tf.config.list_physical_devices('GPU')\n", " if gpus:\n", " print(f\"GPUs detected: {len(gpus)}\")\n", " for gpu in gpus:\n", - " print(f\" - {gpu.name}\")\n", + " print(f\" - {gpu}\")\n", " else:\n", " print(\"No GPUs detected.\")\n", "\n", @@ -44,11 +38,23 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "324dc7a1-61b8-4384-bfed-4385e7dbe49d", "metadata": {}, - "outputs": [], - "source": [] + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')\n" + ] + } + ], + "source": [ + "gpus = tf.config.list_physical_devices('GPU')\n", + "for gpu in gpus:\n", + " print(gpu)" + ] } ], "metadata": {