<?xml version="1.0"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en">
	<id>https://wiki.anunna.wur.nl/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=Brumm025</id>
	<title>HPCwiki - User contributions [en]</title>
	<link rel="self" type="application/atom+xml" href="https://wiki.anunna.wur.nl/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=Brumm025"/>
	<link rel="alternate" type="text/html" href="https://wiki.anunna.wur.nl/index.php/Special:Contributions/Brumm025"/>
	<updated>2026-04-19T20:50:28Z</updated>
	<subtitle>User contributions</subtitle>
	<generator>MediaWiki 1.43.1</generator>
	<entry>
		<id>https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2260</id>
		<title>JupyterHub with GPU</title>
		<link rel="alternate" type="text/html" href="https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2260"/>
		<updated>2023-10-25T11:34:59Z</updated>

		<summary type="html">&lt;p&gt;Brumm025: /* Create conda environment that we can use for a jupyter kernel */&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;Create a jupyterhub instance with GPU support enabled.&lt;br /&gt;
&lt;br /&gt;
= setup =&lt;br /&gt;
&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Log_in_to_Anunna Connect to login node of Anunna]&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Running_Snakemake_pipelines#Installation Install miniconda]&lt;br /&gt;
&lt;br /&gt;
== Link lustre path to home directory ==&lt;br /&gt;
&lt;br /&gt;
When working from Jupyterhub the default working directory is the home folder. However, it is recommended to put your data and code on the lustre pathings. To make this easier, we can create a link to lustre from our home directory:&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
ln -s /lustre/[path to your lustre folder] [reference name, for example lustre_folders]&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
To remove a link: &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
rm [reference name, for example lustre_folders]&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
== Create conda environment that we can use for a jupyter kernel == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
conda create -y -n kernel_test python=3.10 ipykernel &lt;br /&gt;
conda activate kernel_test&lt;br /&gt;
python -m ipykernel install --user --name kernel_test&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
NOTE: You can specific the python version for you conda environment with python=3.10 Please take care what python version is compatible with you required packages.&lt;br /&gt;
&lt;br /&gt;
== Install required packages ==&lt;br /&gt;
&lt;br /&gt;
For pytorch you can find information [https://pytorch.org/get-started/locally/ here] and for TensorFlow [https://www.tensorflow.org/install/pip here].&lt;br /&gt;
 &lt;br /&gt;
As an example I use the following pytorch installation:&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
= Start jupyter notebook with GPU = &lt;br /&gt;
&lt;br /&gt;
Go [https://notebook.anunna.wur.nl here] and select:&lt;br /&gt;
&lt;br /&gt;
* &amp;lt;strong&amp;gt;Select a location for your server:&amp;lt;/strong&amp;gt; on the cluster (default option)&lt;br /&gt;
* &amp;lt;strong&amp;gt;Partition to use:&amp;lt;/strong&amp;gt; gpu&lt;br /&gt;
* &amp;lt;strong&amp;gt;Memory (in MB):&amp;lt;/strong&amp;gt; desired memory&lt;br /&gt;
* &amp;lt;strong&amp;gt;Number of CPUs:&amp;lt;/strong&amp;gt; desired CPU count&lt;br /&gt;
* &amp;lt;strong&amp;gt;Maximum execution time (hours:minutes:seconds):&amp;lt;/strong&amp;gt; maximum amount of time the notebook is available&lt;br /&gt;
* &amp;lt;strong&amp;gt;Extra options:&amp;lt;/strong&amp;gt; --gres=gpu:1 (default when selecting GPU, gpu:x for x amount of GPUs)&lt;br /&gt;
&lt;br /&gt;
= Using multiple GPUs =&lt;br /&gt;
&lt;br /&gt;
* Select multiple GPUs in when starting jupyterhub in the extra options menu: --gres=gpu:x where x is amount of requested GPUs&lt;br /&gt;
* There should be multiple GPUs available to the jupyterhub notebook. Check this by using GPU tests in the following section.&lt;br /&gt;
&lt;br /&gt;
= Test GPU availability =&lt;br /&gt;
&lt;br /&gt;
== Pytorch == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
def check_all_cuda_devices():&lt;br /&gt;
    device_count = torch.cuda.device_count()&lt;br /&gt;
    for i in range(device_count):&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.device(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.get_device_name(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.is_available()&#039;)&lt;br /&gt;
    result = torch.cuda.is_available()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device_count()&#039;)&lt;br /&gt;
    result = torch.cuda.device_count()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.current_device()&#039;)&lt;br /&gt;
    result = torch.cuda.current_device()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device(0)&#039;)&lt;br /&gt;
    result = torch.cuda.device(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name(0)&#039;)&lt;br /&gt;
    result = torch.cuda.get_device_name(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    check_all_cuda_devices()&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda_ops():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3)&#039;)&lt;br /&gt;
    zeros = torch.zeros(2, 3)&lt;br /&gt;
    print(zeros, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3).cuda()&#039;)&lt;br /&gt;
    cuda_zero = torch.zeros(2, 3).cuda()&lt;br /&gt;
    print(cuda_zero, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.tensor([[1, 2, 3], [4, 5, 6]])&#039;)&lt;br /&gt;
    tensor_a = torch.tensor([[1, 2, 3], [4, 5, 6]]).cuda()&lt;br /&gt;
    print(tensor_a, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a + cuda_zero&#039;)&lt;br /&gt;
    sum = tensor_a + cuda_zero&lt;br /&gt;
    print(sum, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a * cuda_twos&#039;)&lt;br /&gt;
    tensor_a = tensor_a.to(torch.float)&lt;br /&gt;
    cuda_zero = cuda_zero.to(torch.float)&lt;br /&gt;
    cuda_twos = (cuda_zero + 1.0) * 2.0&lt;br /&gt;
    product = tensor_a * cuda_twos&lt;br /&gt;
    print(product, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.matmul(tensor_a, cuda_twos.T)&#039;)&lt;br /&gt;
    mat_mul = torch.matmul(tensor_a, cuda_twos.T)&lt;br /&gt;
    print(mat_mul, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    get_version()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;get_version() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    check_cuda()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;check_cuda() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    check_cuda_ops()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;check_cuda_ops() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
== Tensorflow ==&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
import tensorflow as tf&lt;br /&gt;
&lt;br /&gt;
hasGPUSupport = tf.test.is_built_with_cuda()&lt;br /&gt;
gpuList = tf.config.list_physical_devices(&#039;GPU&#039;)&lt;br /&gt;
&lt;br /&gt;
print(&amp;quot;Tensorflow Compiled with CUDA/GPU Support:&amp;quot;, hasGPUSupport)&lt;br /&gt;
print(&amp;quot;Tensorflow can access&amp;quot;, len(gpuList), &amp;quot;GPU&amp;quot;)&lt;br /&gt;
print(&amp;quot;Accessible GPUs are:&amp;quot;)&lt;br /&gt;
print(gpuList)&lt;br /&gt;
&lt;br /&gt;
tf.debugging.set_log_device_placement(True)&lt;br /&gt;
# Place tensors on the GPU&lt;br /&gt;
with tf.device(&#039;device:GPU:0&#039;):&lt;br /&gt;
  a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])&lt;br /&gt;
  b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])&lt;br /&gt;
&lt;br /&gt;
# Run on the GPU&lt;br /&gt;
c = tf.matmul(a, b)&lt;br /&gt;
print(c)&amp;lt;/nowiki&amp;gt;&lt;/div&gt;</summary>
		<author><name>Brumm025</name></author>
	</entry>
	<entry>
		<id>https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2259</id>
		<title>JupyterHub with GPU</title>
		<link rel="alternate" type="text/html" href="https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2259"/>
		<updated>2023-10-25T11:34:43Z</updated>

		<summary type="html">&lt;p&gt;Brumm025: /* Create conda environment that we can use for a jupyter kernel */&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;Create a jupyterhub instance with GPU support enabled.&lt;br /&gt;
&lt;br /&gt;
= setup =&lt;br /&gt;
&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Log_in_to_Anunna Connect to login node of Anunna]&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Running_Snakemake_pipelines#Installation Install miniconda]&lt;br /&gt;
&lt;br /&gt;
== Link lustre path to home directory ==&lt;br /&gt;
&lt;br /&gt;
When working from Jupyterhub the default working directory is the home folder. However, it is recommended to put your data and code on the lustre pathings. To make this easier, we can create a link to lustre from our home directory:&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
ln -s /lustre/[path to your lustre folder] [reference name, for example lustre_folders]&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
To remove a link: &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
rm [reference name, for example lustre_folders]&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
== Create conda environment that we can use for a jupyter kernel == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
conda create -y -n kernel_test python=3.10 ipykernel &lt;br /&gt;
conda activate kernel_test&lt;br /&gt;
python -m ipykernel install --user --name kernel_test&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
NOTE: You can specific the python version for you conda environment with python=3 Please take care what python version is compatible with you required packages.&lt;br /&gt;
&lt;br /&gt;
== Install required packages ==&lt;br /&gt;
&lt;br /&gt;
For pytorch you can find information [https://pytorch.org/get-started/locally/ here] and for TensorFlow [https://www.tensorflow.org/install/pip here].&lt;br /&gt;
 &lt;br /&gt;
As an example I use the following pytorch installation:&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
= Start jupyter notebook with GPU = &lt;br /&gt;
&lt;br /&gt;
Go [https://notebook.anunna.wur.nl here] and select:&lt;br /&gt;
&lt;br /&gt;
* &amp;lt;strong&amp;gt;Select a location for your server:&amp;lt;/strong&amp;gt; on the cluster (default option)&lt;br /&gt;
* &amp;lt;strong&amp;gt;Partition to use:&amp;lt;/strong&amp;gt; gpu&lt;br /&gt;
* &amp;lt;strong&amp;gt;Memory (in MB):&amp;lt;/strong&amp;gt; desired memory&lt;br /&gt;
* &amp;lt;strong&amp;gt;Number of CPUs:&amp;lt;/strong&amp;gt; desired CPU count&lt;br /&gt;
* &amp;lt;strong&amp;gt;Maximum execution time (hours:minutes:seconds):&amp;lt;/strong&amp;gt; maximum amount of time the notebook is available&lt;br /&gt;
* &amp;lt;strong&amp;gt;Extra options:&amp;lt;/strong&amp;gt; --gres=gpu:1 (default when selecting GPU, gpu:x for x amount of GPUs)&lt;br /&gt;
&lt;br /&gt;
= Using multiple GPUs =&lt;br /&gt;
&lt;br /&gt;
* Select multiple GPUs in when starting jupyterhub in the extra options menu: --gres=gpu:x where x is amount of requested GPUs&lt;br /&gt;
* There should be multiple GPUs available to the jupyterhub notebook. Check this by using GPU tests in the following section.&lt;br /&gt;
&lt;br /&gt;
= Test GPU availability =&lt;br /&gt;
&lt;br /&gt;
== Pytorch == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
def check_all_cuda_devices():&lt;br /&gt;
    device_count = torch.cuda.device_count()&lt;br /&gt;
    for i in range(device_count):&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.device(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.get_device_name(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.is_available()&#039;)&lt;br /&gt;
    result = torch.cuda.is_available()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device_count()&#039;)&lt;br /&gt;
    result = torch.cuda.device_count()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.current_device()&#039;)&lt;br /&gt;
    result = torch.cuda.current_device()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device(0)&#039;)&lt;br /&gt;
    result = torch.cuda.device(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name(0)&#039;)&lt;br /&gt;
    result = torch.cuda.get_device_name(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    check_all_cuda_devices()&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda_ops():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3)&#039;)&lt;br /&gt;
    zeros = torch.zeros(2, 3)&lt;br /&gt;
    print(zeros, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3).cuda()&#039;)&lt;br /&gt;
    cuda_zero = torch.zeros(2, 3).cuda()&lt;br /&gt;
    print(cuda_zero, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.tensor([[1, 2, 3], [4, 5, 6]])&#039;)&lt;br /&gt;
    tensor_a = torch.tensor([[1, 2, 3], [4, 5, 6]]).cuda()&lt;br /&gt;
    print(tensor_a, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a + cuda_zero&#039;)&lt;br /&gt;
    sum = tensor_a + cuda_zero&lt;br /&gt;
    print(sum, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a * cuda_twos&#039;)&lt;br /&gt;
    tensor_a = tensor_a.to(torch.float)&lt;br /&gt;
    cuda_zero = cuda_zero.to(torch.float)&lt;br /&gt;
    cuda_twos = (cuda_zero + 1.0) * 2.0&lt;br /&gt;
    product = tensor_a * cuda_twos&lt;br /&gt;
    print(product, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.matmul(tensor_a, cuda_twos.T)&#039;)&lt;br /&gt;
    mat_mul = torch.matmul(tensor_a, cuda_twos.T)&lt;br /&gt;
    print(mat_mul, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    get_version()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;get_version() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    check_cuda()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;check_cuda() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    check_cuda_ops()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;check_cuda_ops() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
== Tensorflow ==&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
import tensorflow as tf&lt;br /&gt;
&lt;br /&gt;
hasGPUSupport = tf.test.is_built_with_cuda()&lt;br /&gt;
gpuList = tf.config.list_physical_devices(&#039;GPU&#039;)&lt;br /&gt;
&lt;br /&gt;
print(&amp;quot;Tensorflow Compiled with CUDA/GPU Support:&amp;quot;, hasGPUSupport)&lt;br /&gt;
print(&amp;quot;Tensorflow can access&amp;quot;, len(gpuList), &amp;quot;GPU&amp;quot;)&lt;br /&gt;
print(&amp;quot;Accessible GPUs are:&amp;quot;)&lt;br /&gt;
print(gpuList)&lt;br /&gt;
&lt;br /&gt;
tf.debugging.set_log_device_placement(True)&lt;br /&gt;
# Place tensors on the GPU&lt;br /&gt;
with tf.device(&#039;device:GPU:0&#039;):&lt;br /&gt;
  a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])&lt;br /&gt;
  b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])&lt;br /&gt;
&lt;br /&gt;
# Run on the GPU&lt;br /&gt;
c = tf.matmul(a, b)&lt;br /&gt;
print(c)&amp;lt;/nowiki&amp;gt;&lt;/div&gt;</summary>
		<author><name>Brumm025</name></author>
	</entry>
	<entry>
		<id>https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2258</id>
		<title>JupyterHub with GPU</title>
		<link rel="alternate" type="text/html" href="https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2258"/>
		<updated>2023-10-25T11:31:57Z</updated>

		<summary type="html">&lt;p&gt;Brumm025: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;Create a jupyterhub instance with GPU support enabled.&lt;br /&gt;
&lt;br /&gt;
= setup =&lt;br /&gt;
&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Log_in_to_Anunna Connect to login node of Anunna]&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Running_Snakemake_pipelines#Installation Install miniconda]&lt;br /&gt;
&lt;br /&gt;
== Link lustre path to home directory ==&lt;br /&gt;
&lt;br /&gt;
When working from Jupyterhub the default working directory is the home folder. However, it is recommended to put your data and code on the lustre pathings. To make this easier, we can create a link to lustre from our home directory:&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
ln -s /lustre/[path to your lustre folder] [reference name, for example lustre_folders]&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
To remove a link: &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
rm [reference name, for example lustre_folders]&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
== Create conda environment that we can use for a jupyter kernel == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
conda create -y -n kernel_test python=3 ipykernel &amp;amp;&amp;amp; conda activate kernel_test&lt;br /&gt;
python -m ipykernel install --user --name kernel_test&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
NOTE: You can specific the python version for you conda environment with python=3 Please take care what python version is compatible with you required packages. &lt;br /&gt;
&lt;br /&gt;
== Install required packages ==&lt;br /&gt;
&lt;br /&gt;
For pytorch you can find information [https://pytorch.org/get-started/locally/ here] and for TensorFlow [https://www.tensorflow.org/install/pip here].&lt;br /&gt;
 &lt;br /&gt;
As an example I use the following pytorch installation:&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
= Start jupyter notebook with GPU = &lt;br /&gt;
&lt;br /&gt;
Go [https://notebook.anunna.wur.nl here] and select:&lt;br /&gt;
&lt;br /&gt;
* &amp;lt;strong&amp;gt;Select a location for your server:&amp;lt;/strong&amp;gt; on the cluster (default option)&lt;br /&gt;
* &amp;lt;strong&amp;gt;Partition to use:&amp;lt;/strong&amp;gt; gpu&lt;br /&gt;
* &amp;lt;strong&amp;gt;Memory (in MB):&amp;lt;/strong&amp;gt; desired memory&lt;br /&gt;
* &amp;lt;strong&amp;gt;Number of CPUs:&amp;lt;/strong&amp;gt; desired CPU count&lt;br /&gt;
* &amp;lt;strong&amp;gt;Maximum execution time (hours:minutes:seconds):&amp;lt;/strong&amp;gt; maximum amount of time the notebook is available&lt;br /&gt;
* &amp;lt;strong&amp;gt;Extra options:&amp;lt;/strong&amp;gt; --gres=gpu:1 (default when selecting GPU, gpu:x for x amount of GPUs)&lt;br /&gt;
&lt;br /&gt;
= Using multiple GPUs =&lt;br /&gt;
&lt;br /&gt;
* Select multiple GPUs in when starting jupyterhub in the extra options menu: --gres=gpu:x where x is amount of requested GPUs&lt;br /&gt;
* There should be multiple GPUs available to the jupyterhub notebook. Check this by using GPU tests in the following section.&lt;br /&gt;
&lt;br /&gt;
= Test GPU availability =&lt;br /&gt;
&lt;br /&gt;
== Pytorch == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
def check_all_cuda_devices():&lt;br /&gt;
    device_count = torch.cuda.device_count()&lt;br /&gt;
    for i in range(device_count):&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.device(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.get_device_name(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.is_available()&#039;)&lt;br /&gt;
    result = torch.cuda.is_available()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device_count()&#039;)&lt;br /&gt;
    result = torch.cuda.device_count()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.current_device()&#039;)&lt;br /&gt;
    result = torch.cuda.current_device()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device(0)&#039;)&lt;br /&gt;
    result = torch.cuda.device(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name(0)&#039;)&lt;br /&gt;
    result = torch.cuda.get_device_name(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    check_all_cuda_devices()&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda_ops():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3)&#039;)&lt;br /&gt;
    zeros = torch.zeros(2, 3)&lt;br /&gt;
    print(zeros, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3).cuda()&#039;)&lt;br /&gt;
    cuda_zero = torch.zeros(2, 3).cuda()&lt;br /&gt;
    print(cuda_zero, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.tensor([[1, 2, 3], [4, 5, 6]])&#039;)&lt;br /&gt;
    tensor_a = torch.tensor([[1, 2, 3], [4, 5, 6]]).cuda()&lt;br /&gt;
    print(tensor_a, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a + cuda_zero&#039;)&lt;br /&gt;
    sum = tensor_a + cuda_zero&lt;br /&gt;
    print(sum, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a * cuda_twos&#039;)&lt;br /&gt;
    tensor_a = tensor_a.to(torch.float)&lt;br /&gt;
    cuda_zero = cuda_zero.to(torch.float)&lt;br /&gt;
    cuda_twos = (cuda_zero + 1.0) * 2.0&lt;br /&gt;
    product = tensor_a * cuda_twos&lt;br /&gt;
    print(product, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.matmul(tensor_a, cuda_twos.T)&#039;)&lt;br /&gt;
    mat_mul = torch.matmul(tensor_a, cuda_twos.T)&lt;br /&gt;
    print(mat_mul, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    get_version()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;get_version() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    check_cuda()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;check_cuda() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    check_cuda_ops()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;check_cuda_ops() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
== Tensorflow ==&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
import tensorflow as tf&lt;br /&gt;
&lt;br /&gt;
hasGPUSupport = tf.test.is_built_with_cuda()&lt;br /&gt;
gpuList = tf.config.list_physical_devices(&#039;GPU&#039;)&lt;br /&gt;
&lt;br /&gt;
print(&amp;quot;Tensorflow Compiled with CUDA/GPU Support:&amp;quot;, hasGPUSupport)&lt;br /&gt;
print(&amp;quot;Tensorflow can access&amp;quot;, len(gpuList), &amp;quot;GPU&amp;quot;)&lt;br /&gt;
print(&amp;quot;Accessible GPUs are:&amp;quot;)&lt;br /&gt;
print(gpuList)&lt;br /&gt;
&lt;br /&gt;
tf.debugging.set_log_device_placement(True)&lt;br /&gt;
# Place tensors on the GPU&lt;br /&gt;
with tf.device(&#039;device:GPU:0&#039;):&lt;br /&gt;
  a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])&lt;br /&gt;
  b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])&lt;br /&gt;
&lt;br /&gt;
# Run on the GPU&lt;br /&gt;
c = tf.matmul(a, b)&lt;br /&gt;
print(c)&amp;lt;/nowiki&amp;gt;&lt;/div&gt;</summary>
		<author><name>Brumm025</name></author>
	</entry>
	<entry>
		<id>https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2256</id>
		<title>JupyterHub with GPU</title>
		<link rel="alternate" type="text/html" href="https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2256"/>
		<updated>2023-10-18T08:47:17Z</updated>

		<summary type="html">&lt;p&gt;Brumm025: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;Create a jupyterhub instance with GPU support enabled.&lt;br /&gt;
&lt;br /&gt;
= setup =&lt;br /&gt;
&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Log_in_to_Anunna Connect to login node of Anunna]&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Running_Snakemake_pipelines#Installation Install miniconda]&lt;br /&gt;
&lt;br /&gt;
== Link lustre path to home directory ==&lt;br /&gt;
&lt;br /&gt;
When working from Jupyterhub the default working directory is the home folder. However, it is recommended to put your data and code on the lustre pathings. To make this easier, we can create a link to lustre from our home directory:&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
ln -s /lustre/[path to your lustre folder] [reference name, for example lustre_folders]&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
== Create conda environment that we can use for a jupyter kernel == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
conda create -y -n kernel_test python=3 ipykernel &amp;amp;&amp;amp; conda activate kernel_test&lt;br /&gt;
python -m ipykernel install --user --name kernel_test&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
NOTE: You can specific the python version for you conda environment with python=3 Please take care what python version is compatible with you required packages. &lt;br /&gt;
&lt;br /&gt;
== Install required packages ==&lt;br /&gt;
&lt;br /&gt;
For pytorch you can find information [https://pytorch.org/get-started/locally/ here] and for TensorFlow [https://www.tensorflow.org/install/pip here].&lt;br /&gt;
 &lt;br /&gt;
As an example I use the following pytorch installation:&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
= Start jupyter notebook with GPU = &lt;br /&gt;
&lt;br /&gt;
Go [https://notebook.anunna.wur.nl here] and select:&lt;br /&gt;
&lt;br /&gt;
* &amp;lt;strong&amp;gt;Select a location for your server:&amp;lt;/strong&amp;gt; on the cluster (default option)&lt;br /&gt;
* &amp;lt;strong&amp;gt;Partition to use:&amp;lt;/strong&amp;gt; gpu&lt;br /&gt;
* &amp;lt;strong&amp;gt;Memory (in MB):&amp;lt;/strong&amp;gt; desired memory&lt;br /&gt;
* &amp;lt;strong&amp;gt;Number of CPUs:&amp;lt;/strong&amp;gt; desired CPU count&lt;br /&gt;
* &amp;lt;strong&amp;gt;Maximum execution time (hours:minutes:seconds):&amp;lt;/strong&amp;gt; maximum amount of time the notebook is available&lt;br /&gt;
* &amp;lt;strong&amp;gt;Extra options:&amp;lt;/strong&amp;gt; --gres=gpu:1 (default when selecting GPU, gpu:x for x amount of GPUs)&lt;br /&gt;
&lt;br /&gt;
= Using multiple GPUs =&lt;br /&gt;
&lt;br /&gt;
* Select multiple GPUs in when starting jupyterhub in the extra options menu: --gres=gpu:x where x is amount of requested GPUs&lt;br /&gt;
* There should be multiple GPUs available to the jupyterhub notebook. Check this by using GPU tests in the following section.&lt;br /&gt;
&lt;br /&gt;
= Test GPU availability =&lt;br /&gt;
&lt;br /&gt;
== Pytorch == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
def check_all_cuda_devices():&lt;br /&gt;
    device_count = torch.cuda.device_count()&lt;br /&gt;
    for i in range(device_count):&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.device(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.get_device_name(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.is_available()&#039;)&lt;br /&gt;
    result = torch.cuda.is_available()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device_count()&#039;)&lt;br /&gt;
    result = torch.cuda.device_count()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.current_device()&#039;)&lt;br /&gt;
    result = torch.cuda.current_device()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device(0)&#039;)&lt;br /&gt;
    result = torch.cuda.device(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name(0)&#039;)&lt;br /&gt;
    result = torch.cuda.get_device_name(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    check_all_cuda_devices()&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda_ops():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3)&#039;)&lt;br /&gt;
    zeros = torch.zeros(2, 3)&lt;br /&gt;
    print(zeros, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3).cuda()&#039;)&lt;br /&gt;
    cuda_zero = torch.zeros(2, 3).cuda()&lt;br /&gt;
    print(cuda_zero, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.tensor([[1, 2, 3], [4, 5, 6]])&#039;)&lt;br /&gt;
    tensor_a = torch.tensor([[1, 2, 3], [4, 5, 6]]).cuda()&lt;br /&gt;
    print(tensor_a, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a + cuda_zero&#039;)&lt;br /&gt;
    sum = tensor_a + cuda_zero&lt;br /&gt;
    print(sum, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a * cuda_twos&#039;)&lt;br /&gt;
    tensor_a = tensor_a.to(torch.float)&lt;br /&gt;
    cuda_zero = cuda_zero.to(torch.float)&lt;br /&gt;
    cuda_twos = (cuda_zero + 1.0) * 2.0&lt;br /&gt;
    product = tensor_a * cuda_twos&lt;br /&gt;
    print(product, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.matmul(tensor_a, cuda_twos.T)&#039;)&lt;br /&gt;
    mat_mul = torch.matmul(tensor_a, cuda_twos.T)&lt;br /&gt;
    print(mat_mul, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    get_version()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;get_version() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    check_cuda()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;check_cuda() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    check_cuda_ops()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;check_cuda_ops() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
== Tensorflow ==&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
import tensorflow as tf&lt;br /&gt;
&lt;br /&gt;
hasGPUSupport = tf.test.is_built_with_cuda()&lt;br /&gt;
gpuList = tf.config.list_physical_devices(&#039;GPU&#039;)&lt;br /&gt;
&lt;br /&gt;
print(&amp;quot;Tensorflow Compiled with CUDA/GPU Support:&amp;quot;, hasGPUSupport)&lt;br /&gt;
print(&amp;quot;Tensorflow can access&amp;quot;, len(gpuList), &amp;quot;GPU&amp;quot;)&lt;br /&gt;
print(&amp;quot;Accessible GPUs are:&amp;quot;)&lt;br /&gt;
print(gpuList)&lt;br /&gt;
&lt;br /&gt;
tf.debugging.set_log_device_placement(True)&lt;br /&gt;
# Place tensors on the GPU&lt;br /&gt;
with tf.device(&#039;device:GPU:0&#039;):&lt;br /&gt;
  a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])&lt;br /&gt;
  b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])&lt;br /&gt;
&lt;br /&gt;
# Run on the GPU&lt;br /&gt;
c = tf.matmul(a, b)&lt;br /&gt;
print(c)&amp;lt;/nowiki&amp;gt;&lt;/div&gt;</summary>
		<author><name>Brumm025</name></author>
	</entry>
	<entry>
		<id>https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2255</id>
		<title>JupyterHub with GPU</title>
		<link rel="alternate" type="text/html" href="https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2255"/>
		<updated>2023-10-06T11:36:38Z</updated>

		<summary type="html">&lt;p&gt;Brumm025: /* Start jupyter notebook with GPU */&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;Create a jupyterhub instance with GPU support enabled.&lt;br /&gt;
&lt;br /&gt;
= setup =&lt;br /&gt;
&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Log_in_to_Anunna Connect to login node of Anunna]&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Running_Snakemake_pipelines#Installation Install miniconda]&lt;br /&gt;
&lt;br /&gt;
== Create conda environment that we can use for a jupyter kernel == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
conda create -y -n kernel_test python=3 ipykernel &amp;amp;&amp;amp; conda activate kernel_test&lt;br /&gt;
python -m ipykernel install --user --name kernel_test&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
NOTE: You can specific the python version for you conda environment with python=3 Please take care what python version is compatible with you required packages. &lt;br /&gt;
&lt;br /&gt;
== Install required packages ==&lt;br /&gt;
&lt;br /&gt;
For pytorch you can find information [https://pytorch.org/get-started/locally/ here] and for TensorFlow [https://www.tensorflow.org/install/pip here].&lt;br /&gt;
 &lt;br /&gt;
As an example I use the following pytorch installation:&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
= Start jupyter notebook with GPU = &lt;br /&gt;
&lt;br /&gt;
Go [https://notebook.anunna.wur.nl here] and select:&lt;br /&gt;
&lt;br /&gt;
* &amp;lt;strong&amp;gt;Select a location for your server:&amp;lt;/strong&amp;gt; on the cluster (default option)&lt;br /&gt;
* &amp;lt;strong&amp;gt;Partition to use:&amp;lt;/strong&amp;gt; gpu&lt;br /&gt;
* &amp;lt;strong&amp;gt;Memory (in MB):&amp;lt;/strong&amp;gt; desired memory&lt;br /&gt;
* &amp;lt;strong&amp;gt;Number of CPUs:&amp;lt;/strong&amp;gt; desired CPU count&lt;br /&gt;
* &amp;lt;strong&amp;gt;Maximum execution time (hours:minutes:seconds):&amp;lt;/strong&amp;gt; maximum amount of time the notebook is available&lt;br /&gt;
* &amp;lt;strong&amp;gt;Extra options:&amp;lt;/strong&amp;gt; --gres=gpu:1 (default when selecting GPU, gpu:x for x amount of GPUs)&lt;br /&gt;
&lt;br /&gt;
= Test GPU availability =&lt;br /&gt;
&lt;br /&gt;
== Pytorch == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
def check_all_cuda_devices():&lt;br /&gt;
    device_count = torch.cuda.device_count()&lt;br /&gt;
    for i in range(device_count):&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.device(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.get_device_name(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.is_available()&#039;)&lt;br /&gt;
    result = torch.cuda.is_available()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device_count()&#039;)&lt;br /&gt;
    result = torch.cuda.device_count()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.current_device()&#039;)&lt;br /&gt;
    result = torch.cuda.current_device()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device(0)&#039;)&lt;br /&gt;
    result = torch.cuda.device(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name(0)&#039;)&lt;br /&gt;
    result = torch.cuda.get_device_name(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    check_all_cuda_devices()&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda_ops():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3)&#039;)&lt;br /&gt;
    zeros = torch.zeros(2, 3)&lt;br /&gt;
    print(zeros, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3).cuda()&#039;)&lt;br /&gt;
    cuda_zero = torch.zeros(2, 3).cuda()&lt;br /&gt;
    print(cuda_zero, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.tensor([[1, 2, 3], [4, 5, 6]])&#039;)&lt;br /&gt;
    tensor_a = torch.tensor([[1, 2, 3], [4, 5, 6]]).cuda()&lt;br /&gt;
    print(tensor_a, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a + cuda_zero&#039;)&lt;br /&gt;
    sum = tensor_a + cuda_zero&lt;br /&gt;
    print(sum, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a * cuda_twos&#039;)&lt;br /&gt;
    tensor_a = tensor_a.to(torch.float)&lt;br /&gt;
    cuda_zero = cuda_zero.to(torch.float)&lt;br /&gt;
    cuda_twos = (cuda_zero + 1.0) * 2.0&lt;br /&gt;
    product = tensor_a * cuda_twos&lt;br /&gt;
    print(product, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.matmul(tensor_a, cuda_twos.T)&#039;)&lt;br /&gt;
    mat_mul = torch.matmul(tensor_a, cuda_twos.T)&lt;br /&gt;
    print(mat_mul, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    get_version()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;get_version() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    check_cuda()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;check_cuda() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    check_cuda_ops()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;check_cuda_ops() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
== Tensorflow ==&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
import tensorflow as tf&lt;br /&gt;
&lt;br /&gt;
hasGPUSupport = tf.test.is_built_with_cuda()&lt;br /&gt;
gpuList = tf.config.list_physical_devices(&#039;GPU&#039;)&lt;br /&gt;
&lt;br /&gt;
print(&amp;quot;Tensorflow Compiled with CUDA/GPU Support:&amp;quot;, hasGPUSupport)&lt;br /&gt;
print(&amp;quot;Tensorflow can access&amp;quot;, len(gpuList), &amp;quot;GPU&amp;quot;)&lt;br /&gt;
print(&amp;quot;Accessible GPUs are:&amp;quot;)&lt;br /&gt;
print(gpuList)&lt;br /&gt;
&lt;br /&gt;
tf.debugging.set_log_device_placement(True)&lt;br /&gt;
# Place tensors on the GPU&lt;br /&gt;
with tf.device(&#039;device:GPU:0&#039;):&lt;br /&gt;
  a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])&lt;br /&gt;
  b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])&lt;br /&gt;
&lt;br /&gt;
# Run on the GPU&lt;br /&gt;
c = tf.matmul(a, b)&lt;br /&gt;
print(c)&amp;lt;/nowiki&amp;gt;&lt;/div&gt;</summary>
		<author><name>Brumm025</name></author>
	</entry>
	<entry>
		<id>https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2254</id>
		<title>JupyterHub with GPU</title>
		<link rel="alternate" type="text/html" href="https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2254"/>
		<updated>2023-10-06T11:36:27Z</updated>

		<summary type="html">&lt;p&gt;Brumm025: /* Start jupyter notebook with GPU */&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;Create a jupyterhub instance with GPU support enabled.&lt;br /&gt;
&lt;br /&gt;
= setup =&lt;br /&gt;
&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Log_in_to_Anunna Connect to login node of Anunna]&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Running_Snakemake_pipelines#Installation Install miniconda]&lt;br /&gt;
&lt;br /&gt;
== Create conda environment that we can use for a jupyter kernel == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
conda create -y -n kernel_test python=3 ipykernel &amp;amp;&amp;amp; conda activate kernel_test&lt;br /&gt;
python -m ipykernel install --user --name kernel_test&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
NOTE: You can specific the python version for you conda environment with python=3 Please take care what python version is compatible with you required packages. &lt;br /&gt;
&lt;br /&gt;
== Install required packages ==&lt;br /&gt;
&lt;br /&gt;
For pytorch you can find information [https://pytorch.org/get-started/locally/ here] and for TensorFlow [https://www.tensorflow.org/install/pip here].&lt;br /&gt;
 &lt;br /&gt;
As an example I use the following pytorch installation:&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
= Start jupyter notebook with GPU = &lt;br /&gt;
&lt;br /&gt;
Go [https://notebook.anunna.wur.nl here] and select:&lt;br /&gt;
&lt;br /&gt;
* &amp;lt;strong&amp;gt;Select a location for your server:&amp;lt;/strong&amp;gt; on the cluster (default option)&lt;br /&gt;
* &amp;lt;strong&amp;gt;Partition to use:&amp;lt;/strong&amp;gt; gpu&lt;br /&gt;
* &amp;lt;strong&amp;gt;Partition to use:&amp;lt;/strong&amp;gt; gpu&lt;br /&gt;
* &amp;lt;strong&amp;gt;Memory (in MB):&amp;lt;/strong&amp;gt; desired memory&lt;br /&gt;
* &amp;lt;strong&amp;gt;Number of CPUs:&amp;lt;/strong&amp;gt; desired CPU count&lt;br /&gt;
* &amp;lt;strong&amp;gt;Maximum execution time (hours:minutes:seconds):&amp;lt;/strong&amp;gt; maximum amount of time the notebook is available&lt;br /&gt;
* &amp;lt;strong&amp;gt;Extra options:&amp;lt;/strong&amp;gt; --gres=gpu:1 (default when selecting GPU, gpu:x for x amount of GPUs)&lt;br /&gt;
&lt;br /&gt;
= Test GPU availability =&lt;br /&gt;
&lt;br /&gt;
== Pytorch == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
def check_all_cuda_devices():&lt;br /&gt;
    device_count = torch.cuda.device_count()&lt;br /&gt;
    for i in range(device_count):&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.device(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.get_device_name(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.is_available()&#039;)&lt;br /&gt;
    result = torch.cuda.is_available()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device_count()&#039;)&lt;br /&gt;
    result = torch.cuda.device_count()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.current_device()&#039;)&lt;br /&gt;
    result = torch.cuda.current_device()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device(0)&#039;)&lt;br /&gt;
    result = torch.cuda.device(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name(0)&#039;)&lt;br /&gt;
    result = torch.cuda.get_device_name(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    check_all_cuda_devices()&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda_ops():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3)&#039;)&lt;br /&gt;
    zeros = torch.zeros(2, 3)&lt;br /&gt;
    print(zeros, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3).cuda()&#039;)&lt;br /&gt;
    cuda_zero = torch.zeros(2, 3).cuda()&lt;br /&gt;
    print(cuda_zero, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.tensor([[1, 2, 3], [4, 5, 6]])&#039;)&lt;br /&gt;
    tensor_a = torch.tensor([[1, 2, 3], [4, 5, 6]]).cuda()&lt;br /&gt;
    print(tensor_a, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a + cuda_zero&#039;)&lt;br /&gt;
    sum = tensor_a + cuda_zero&lt;br /&gt;
    print(sum, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a * cuda_twos&#039;)&lt;br /&gt;
    tensor_a = tensor_a.to(torch.float)&lt;br /&gt;
    cuda_zero = cuda_zero.to(torch.float)&lt;br /&gt;
    cuda_twos = (cuda_zero + 1.0) * 2.0&lt;br /&gt;
    product = tensor_a * cuda_twos&lt;br /&gt;
    print(product, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.matmul(tensor_a, cuda_twos.T)&#039;)&lt;br /&gt;
    mat_mul = torch.matmul(tensor_a, cuda_twos.T)&lt;br /&gt;
    print(mat_mul, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    get_version()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;get_version() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    check_cuda()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;check_cuda() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    check_cuda_ops()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;check_cuda_ops() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
== Tensorflow ==&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
import tensorflow as tf&lt;br /&gt;
&lt;br /&gt;
hasGPUSupport = tf.test.is_built_with_cuda()&lt;br /&gt;
gpuList = tf.config.list_physical_devices(&#039;GPU&#039;)&lt;br /&gt;
&lt;br /&gt;
print(&amp;quot;Tensorflow Compiled with CUDA/GPU Support:&amp;quot;, hasGPUSupport)&lt;br /&gt;
print(&amp;quot;Tensorflow can access&amp;quot;, len(gpuList), &amp;quot;GPU&amp;quot;)&lt;br /&gt;
print(&amp;quot;Accessible GPUs are:&amp;quot;)&lt;br /&gt;
print(gpuList)&lt;br /&gt;
&lt;br /&gt;
tf.debugging.set_log_device_placement(True)&lt;br /&gt;
# Place tensors on the GPU&lt;br /&gt;
with tf.device(&#039;device:GPU:0&#039;):&lt;br /&gt;
  a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])&lt;br /&gt;
  b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])&lt;br /&gt;
&lt;br /&gt;
# Run on the GPU&lt;br /&gt;
c = tf.matmul(a, b)&lt;br /&gt;
print(c)&amp;lt;/nowiki&amp;gt;&lt;/div&gt;</summary>
		<author><name>Brumm025</name></author>
	</entry>
	<entry>
		<id>https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2253</id>
		<title>JupyterHub with GPU</title>
		<link rel="alternate" type="text/html" href="https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2253"/>
		<updated>2023-10-06T11:34:29Z</updated>

		<summary type="html">&lt;p&gt;Brumm025: /* Tensorflow */&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;Create a jupyterhub instance with GPU support enabled.&lt;br /&gt;
&lt;br /&gt;
= setup =&lt;br /&gt;
&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Log_in_to_Anunna Connect to login node of Anunna]&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Running_Snakemake_pipelines#Installation Install miniconda]&lt;br /&gt;
&lt;br /&gt;
== Create conda environment that we can use for a jupyter kernel == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
conda create -y -n kernel_test python=3 ipykernel &amp;amp;&amp;amp; conda activate kernel_test&lt;br /&gt;
python -m ipykernel install --user --name kernel_test&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
NOTE: You can specific the python version for you conda environment with python=3 Please take care what python version is compatible with you required packages. &lt;br /&gt;
&lt;br /&gt;
== Install required packages ==&lt;br /&gt;
&lt;br /&gt;
For pytorch you can find information [https://pytorch.org/get-started/locally/ here] and for TensorFlow [https://www.tensorflow.org/install/pip here].&lt;br /&gt;
 &lt;br /&gt;
As an example I use the following pytorch installation:&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
= Start jupyter notebook with GPU = &lt;br /&gt;
&lt;br /&gt;
Go [https://notebook.anunna.wur.nl here] and select:&lt;br /&gt;
&lt;br /&gt;
* Select a location for your server: on the cluster (default option)&lt;br /&gt;
* Partition to use: gpu&lt;br /&gt;
* Partition to use: gpu&lt;br /&gt;
* Memory (in MB): desired memory&lt;br /&gt;
* Number of CPUs: desired CPU count&lt;br /&gt;
* Maximum execution time (hours:minutes:seconds): maximum amount of time the notebook is available&lt;br /&gt;
* Extra options: --gres=gpu:1 (default when selecting GPU, gpu:x for x amount of GPUs)&lt;br /&gt;
&lt;br /&gt;
= Test GPU availability =&lt;br /&gt;
&lt;br /&gt;
== Pytorch == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
def check_all_cuda_devices():&lt;br /&gt;
    device_count = torch.cuda.device_count()&lt;br /&gt;
    for i in range(device_count):&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.device(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.get_device_name(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.is_available()&#039;)&lt;br /&gt;
    result = torch.cuda.is_available()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device_count()&#039;)&lt;br /&gt;
    result = torch.cuda.device_count()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.current_device()&#039;)&lt;br /&gt;
    result = torch.cuda.current_device()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device(0)&#039;)&lt;br /&gt;
    result = torch.cuda.device(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name(0)&#039;)&lt;br /&gt;
    result = torch.cuda.get_device_name(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    check_all_cuda_devices()&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda_ops():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3)&#039;)&lt;br /&gt;
    zeros = torch.zeros(2, 3)&lt;br /&gt;
    print(zeros, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3).cuda()&#039;)&lt;br /&gt;
    cuda_zero = torch.zeros(2, 3).cuda()&lt;br /&gt;
    print(cuda_zero, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.tensor([[1, 2, 3], [4, 5, 6]])&#039;)&lt;br /&gt;
    tensor_a = torch.tensor([[1, 2, 3], [4, 5, 6]]).cuda()&lt;br /&gt;
    print(tensor_a, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a + cuda_zero&#039;)&lt;br /&gt;
    sum = tensor_a + cuda_zero&lt;br /&gt;
    print(sum, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a * cuda_twos&#039;)&lt;br /&gt;
    tensor_a = tensor_a.to(torch.float)&lt;br /&gt;
    cuda_zero = cuda_zero.to(torch.float)&lt;br /&gt;
    cuda_twos = (cuda_zero + 1.0) * 2.0&lt;br /&gt;
    product = tensor_a * cuda_twos&lt;br /&gt;
    print(product, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.matmul(tensor_a, cuda_twos.T)&#039;)&lt;br /&gt;
    mat_mul = torch.matmul(tensor_a, cuda_twos.T)&lt;br /&gt;
    print(mat_mul, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    get_version()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;get_version() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    check_cuda()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;check_cuda() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    check_cuda_ops()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;check_cuda_ops() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
== Tensorflow ==&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
import tensorflow as tf&lt;br /&gt;
&lt;br /&gt;
hasGPUSupport = tf.test.is_built_with_cuda()&lt;br /&gt;
gpuList = tf.config.list_physical_devices(&#039;GPU&#039;)&lt;br /&gt;
&lt;br /&gt;
print(&amp;quot;Tensorflow Compiled with CUDA/GPU Support:&amp;quot;, hasGPUSupport)&lt;br /&gt;
print(&amp;quot;Tensorflow can access&amp;quot;, len(gpuList), &amp;quot;GPU&amp;quot;)&lt;br /&gt;
print(&amp;quot;Accessible GPUs are:&amp;quot;)&lt;br /&gt;
print(gpuList)&lt;br /&gt;
&lt;br /&gt;
tf.debugging.set_log_device_placement(True)&lt;br /&gt;
# Place tensors on the GPU&lt;br /&gt;
with tf.device(&#039;device:GPU:0&#039;):&lt;br /&gt;
  a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])&lt;br /&gt;
  b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])&lt;br /&gt;
&lt;br /&gt;
# Run on the GPU&lt;br /&gt;
c = tf.matmul(a, b)&lt;br /&gt;
print(c)&amp;lt;/nowiki&amp;gt;&lt;/div&gt;</summary>
		<author><name>Brumm025</name></author>
	</entry>
	<entry>
		<id>https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2252</id>
		<title>JupyterHub with GPU</title>
		<link rel="alternate" type="text/html" href="https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2252"/>
		<updated>2023-10-06T11:34:09Z</updated>

		<summary type="html">&lt;p&gt;Brumm025: /* Pytorch */&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;Create a jupyterhub instance with GPU support enabled.&lt;br /&gt;
&lt;br /&gt;
= setup =&lt;br /&gt;
&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Log_in_to_Anunna Connect to login node of Anunna]&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Running_Snakemake_pipelines#Installation Install miniconda]&lt;br /&gt;
&lt;br /&gt;
== Create conda environment that we can use for a jupyter kernel == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
conda create -y -n kernel_test python=3 ipykernel &amp;amp;&amp;amp; conda activate kernel_test&lt;br /&gt;
python -m ipykernel install --user --name kernel_test&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
NOTE: You can specific the python version for you conda environment with python=3 Please take care what python version is compatible with you required packages. &lt;br /&gt;
&lt;br /&gt;
== Install required packages ==&lt;br /&gt;
&lt;br /&gt;
For pytorch you can find information [https://pytorch.org/get-started/locally/ here] and for TensorFlow [https://www.tensorflow.org/install/pip here].&lt;br /&gt;
 &lt;br /&gt;
As an example I use the following pytorch installation:&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
= Start jupyter notebook with GPU = &lt;br /&gt;
&lt;br /&gt;
Go [https://notebook.anunna.wur.nl here] and select:&lt;br /&gt;
&lt;br /&gt;
* Select a location for your server: on the cluster (default option)&lt;br /&gt;
* Partition to use: gpu&lt;br /&gt;
* Partition to use: gpu&lt;br /&gt;
* Memory (in MB): desired memory&lt;br /&gt;
* Number of CPUs: desired CPU count&lt;br /&gt;
* Maximum execution time (hours:minutes:seconds): maximum amount of time the notebook is available&lt;br /&gt;
* Extra options: --gres=gpu:1 (default when selecting GPU, gpu:x for x amount of GPUs)&lt;br /&gt;
&lt;br /&gt;
= Test GPU availability =&lt;br /&gt;
&lt;br /&gt;
== Pytorch == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
def check_all_cuda_devices():&lt;br /&gt;
    device_count = torch.cuda.device_count()&lt;br /&gt;
    for i in range(device_count):&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.device(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.get_device_name(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.is_available()&#039;)&lt;br /&gt;
    result = torch.cuda.is_available()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device_count()&#039;)&lt;br /&gt;
    result = torch.cuda.device_count()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.current_device()&#039;)&lt;br /&gt;
    result = torch.cuda.current_device()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device(0)&#039;)&lt;br /&gt;
    result = torch.cuda.device(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name(0)&#039;)&lt;br /&gt;
    result = torch.cuda.get_device_name(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    check_all_cuda_devices()&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda_ops():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3)&#039;)&lt;br /&gt;
    zeros = torch.zeros(2, 3)&lt;br /&gt;
    print(zeros, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3).cuda()&#039;)&lt;br /&gt;
    cuda_zero = torch.zeros(2, 3).cuda()&lt;br /&gt;
    print(cuda_zero, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.tensor([[1, 2, 3], [4, 5, 6]])&#039;)&lt;br /&gt;
    tensor_a = torch.tensor([[1, 2, 3], [4, 5, 6]]).cuda()&lt;br /&gt;
    print(tensor_a, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a + cuda_zero&#039;)&lt;br /&gt;
    sum = tensor_a + cuda_zero&lt;br /&gt;
    print(sum, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a * cuda_twos&#039;)&lt;br /&gt;
    tensor_a = tensor_a.to(torch.float)&lt;br /&gt;
    cuda_zero = cuda_zero.to(torch.float)&lt;br /&gt;
    cuda_twos = (cuda_zero + 1.0) * 2.0&lt;br /&gt;
    product = tensor_a * cuda_twos&lt;br /&gt;
    print(product, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.matmul(tensor_a, cuda_twos.T)&#039;)&lt;br /&gt;
    mat_mul = torch.matmul(tensor_a, cuda_twos.T)&lt;br /&gt;
    print(mat_mul, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    get_version()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;get_version() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    check_cuda()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;check_cuda() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&lt;br /&gt;
&lt;br /&gt;
try:&lt;br /&gt;
    check_cuda_ops()&lt;br /&gt;
except Exception as e:&lt;br /&gt;
    print(&#039;check_cuda_ops() failed, exception message below:&#039;)&lt;br /&gt;
    print(e)&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
== Tensorflow ==&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
import tensorflow as tf&lt;br /&gt;
hasGPUSupport = tf.test.is_built_with_cuda()&lt;br /&gt;
gpuList = tf.config.list_physical_devices(&#039;GPU&#039;)&lt;br /&gt;
print(&amp;quot;Tensorflow Compiled with CUDA/GPU Support:&amp;quot;, hasGPUSupport)&lt;br /&gt;
print(&amp;quot;Tensorflow can access&amp;quot;, len(gpuList), &amp;quot;GPU&amp;quot;)&lt;br /&gt;
print(&amp;quot;Accessible GPUs are:&amp;quot;)&lt;br /&gt;
print(gpuList)&lt;br /&gt;
&lt;br /&gt;
tf.debugging.set_log_device_placement(True)&lt;br /&gt;
# Place tensors on the GPU&lt;br /&gt;
with tf.device(&#039;device:GPU:0&#039;):&lt;br /&gt;
  a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])&lt;br /&gt;
  b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])&lt;br /&gt;
&lt;br /&gt;
# Run on the GPU&lt;br /&gt;
c = tf.matmul(a, b)&lt;br /&gt;
print(c)&amp;lt;/nowiki&amp;gt;&lt;/div&gt;</summary>
		<author><name>Brumm025</name></author>
	</entry>
	<entry>
		<id>https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2251</id>
		<title>JupyterHub with GPU</title>
		<link rel="alternate" type="text/html" href="https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2251"/>
		<updated>2023-10-06T11:30:14Z</updated>

		<summary type="html">&lt;p&gt;Brumm025: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;Create a jupyterhub instance with GPU support enabled.&lt;br /&gt;
&lt;br /&gt;
= setup =&lt;br /&gt;
&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Log_in_to_Anunna Connect to login node of Anunna]&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Running_Snakemake_pipelines#Installation Install miniconda]&lt;br /&gt;
&lt;br /&gt;
== Create conda environment that we can use for a jupyter kernel == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
conda create -y -n kernel_test python=3 ipykernel &amp;amp;&amp;amp; conda activate kernel_test&lt;br /&gt;
python -m ipykernel install --user --name kernel_test&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
NOTE: You can specific the python version for you conda environment with python=3 Please take care what python version is compatible with you required packages. &lt;br /&gt;
&lt;br /&gt;
== Install required packages ==&lt;br /&gt;
&lt;br /&gt;
For pytorch you can find information [https://pytorch.org/get-started/locally/ here] and for TensorFlow [https://www.tensorflow.org/install/pip here].&lt;br /&gt;
 &lt;br /&gt;
As an example I use the following pytorch installation:&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
= Start jupyter notebook with GPU = &lt;br /&gt;
&lt;br /&gt;
Go [https://notebook.anunna.wur.nl here] and select:&lt;br /&gt;
&lt;br /&gt;
* Select a location for your server: on the cluster (default option)&lt;br /&gt;
* Partition to use: gpu&lt;br /&gt;
* Partition to use: gpu&lt;br /&gt;
* Memory (in MB): desired memory&lt;br /&gt;
* Number of CPUs: desired CPU count&lt;br /&gt;
* Maximum execution time (hours:minutes:seconds): maximum amount of time the notebook is available&lt;br /&gt;
* Extra options: --gres=gpu:1 (default when selecting GPU, gpu:x for x amount of GPUs)&lt;br /&gt;
&lt;br /&gt;
= Test GPU availability =&lt;br /&gt;
&lt;br /&gt;
== Pytorch == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
def check_all_cuda_devices():&lt;br /&gt;
    device_count = torch.cuda.device_count()&lt;br /&gt;
    for i in range(device_count):&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.device(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.get_device_name(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.is_available()&#039;)&lt;br /&gt;
    result = torch.cuda.is_available()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device_count()&#039;)&lt;br /&gt;
    result = torch.cuda.device_count()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.current_device()&#039;)&lt;br /&gt;
    result = torch.cuda.current_device()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device(0)&#039;)&lt;br /&gt;
    result = torch.cuda.device(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name(0)&#039;)&lt;br /&gt;
    result = torch.cuda.get_device_name(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    check_all_cuda_devices()&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda_ops():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3)&#039;)&lt;br /&gt;
    zeros = torch.zeros(2, 3)&lt;br /&gt;
    print(zeros, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3).cuda()&#039;)&lt;br /&gt;
    cuda_zero = torch.zeros(2, 3).cuda()&lt;br /&gt;
    print(cuda_zero, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.tensor([[1, 2, 3], [4, 5, 6]])&#039;)&lt;br /&gt;
    tensor_a = torch.tensor([[1, 2, 3], [4, 5, 6]]).cuda()&lt;br /&gt;
    print(tensor_a, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a + cuda_zero&#039;)&lt;br /&gt;
    sum = tensor_a + cuda_zero&lt;br /&gt;
    print(sum, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a * cuda_twos&#039;)&lt;br /&gt;
    tensor_a = tensor_a.to(torch.float)&lt;br /&gt;
    cuda_zero = cuda_zero.to(torch.float)&lt;br /&gt;
    cuda_twos = (cuda_zero + 1.0) * 2.0&lt;br /&gt;
    product = tensor_a * cuda_twos&lt;br /&gt;
    print(product, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.matmul(tensor_a, cuda_twos.T)&#039;)&lt;br /&gt;
    mat_mul = torch.matmul(tensor_a, cuda_twos.T)&lt;br /&gt;
    print(mat_mul, &#039;\n&#039;)&lt;br /&gt;
    try:&lt;br /&gt;
        get_version()&lt;br /&gt;
    except Exception as e:&lt;br /&gt;
        print(&#039;get_version() failed, exception message below:&#039;)&lt;br /&gt;
        print(e)&lt;br /&gt;
&lt;br /&gt;
    try:&lt;br /&gt;
        check_cuda()&lt;br /&gt;
    except Exception as e:&lt;br /&gt;
        print(&#039;check_cuda() failed, exception message below:&#039;)&lt;br /&gt;
        print(e)&lt;br /&gt;
&lt;br /&gt;
    #print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; time.sleep(20)&#039;)&lt;br /&gt;
    #time.sleep(20)&lt;br /&gt;
&lt;br /&gt;
    try:&lt;br /&gt;
        check_cuda_ops()&lt;br /&gt;
    except Exception as e:&lt;br /&gt;
        print(&#039;check_cuda_ops() failed, exception message below:&#039;)&lt;br /&gt;
        print(e)&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
== Tensorflow ==&lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
import tensorflow as tf&lt;br /&gt;
hasGPUSupport = tf.test.is_built_with_cuda()&lt;br /&gt;
gpuList = tf.config.list_physical_devices(&#039;GPU&#039;)&lt;br /&gt;
print(&amp;quot;Tensorflow Compiled with CUDA/GPU Support:&amp;quot;, hasGPUSupport)&lt;br /&gt;
print(&amp;quot;Tensorflow can access&amp;quot;, len(gpuList), &amp;quot;GPU&amp;quot;)&lt;br /&gt;
print(&amp;quot;Accessible GPUs are:&amp;quot;)&lt;br /&gt;
print(gpuList)&lt;br /&gt;
&lt;br /&gt;
tf.debugging.set_log_device_placement(True)&lt;br /&gt;
# Place tensors on the GPU&lt;br /&gt;
with tf.device(&#039;device:GPU:0&#039;):&lt;br /&gt;
  a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])&lt;br /&gt;
  b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])&lt;br /&gt;
&lt;br /&gt;
# Run on the GPU&lt;br /&gt;
c = tf.matmul(a, b)&lt;br /&gt;
print(c)&amp;lt;/nowiki&amp;gt;&lt;/div&gt;</summary>
		<author><name>Brumm025</name></author>
	</entry>
	<entry>
		<id>https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2250</id>
		<title>JupyterHub with GPU</title>
		<link rel="alternate" type="text/html" href="https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2250"/>
		<updated>2023-10-06T11:29:23Z</updated>

		<summary type="html">&lt;p&gt;Brumm025: /* Install required packages */&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;Create a jupyterhub instance with GPU support enabled.&lt;br /&gt;
&lt;br /&gt;
= setup =&lt;br /&gt;
&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Log_in_to_Anunna Connect to login node of Anunna]&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Running_Snakemake_pipelines#Installation Install miniconda]&lt;br /&gt;
&lt;br /&gt;
== Create conda environment that we can use for a jupyter kernel == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
conda create -y -n kernel_test python=3 ipykernel &amp;amp;&amp;amp; conda activate kernel_test&lt;br /&gt;
python -m ipykernel install --user --name kernel_test&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
NOTE: You can specific the python version for you conda environment with python=3 Please take care what python version is compatible with you required packages. &lt;br /&gt;
&lt;br /&gt;
== Install required packages ==&lt;br /&gt;
&lt;br /&gt;
For pytorch you can find information [https://pytorch.org/get-started/locally/ here] and for TensorFlow [https://www.tensorflow.org/install/pip here].&lt;br /&gt;
 &lt;br /&gt;
As an example I use the following pytorch installation:&lt;br /&gt;
&lt;br /&gt;
&amp;lt;nowiki&amp;gt;&lt;br /&gt;
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118&lt;br /&gt;
&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
= Start jupyter notebook with GPU = &lt;br /&gt;
&lt;br /&gt;
Go [https://notebook.anunna.wur.nl here] and select:&lt;br /&gt;
&lt;br /&gt;
* Select a location for your server: on the cluster (default option)&lt;br /&gt;
* Partition to use: gpu&lt;br /&gt;
* Partition to use: gpu&lt;br /&gt;
* Memory (in MB): desired memory&lt;br /&gt;
* Number of CPUs: desired CPU count&lt;br /&gt;
* Maximum execution time (hours:minutes:seconds): maximum amount of time the notebook is available&lt;br /&gt;
* Extra options: --gres=gpu:1 (default when selecting GPU, gpu:x for x amount of GPUs)&lt;br /&gt;
&lt;br /&gt;
= Test GPU availability =&lt;br /&gt;
&lt;br /&gt;
== Pytorch == &lt;br /&gt;
&lt;br /&gt;
&amp;lt;nowiki&amp;gt;&lt;br /&gt;
def check_all_cuda_devices():&lt;br /&gt;
    device_count = torch.cuda.device_count()&lt;br /&gt;
    for i in range(device_count):&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.device(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.get_device_name(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.is_available()&#039;)&lt;br /&gt;
    result = torch.cuda.is_available()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device_count()&#039;)&lt;br /&gt;
    result = torch.cuda.device_count()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.current_device()&#039;)&lt;br /&gt;
    result = torch.cuda.current_device()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device(0)&#039;)&lt;br /&gt;
    result = torch.cuda.device(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name(0)&#039;)&lt;br /&gt;
    result = torch.cuda.get_device_name(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    check_all_cuda_devices()&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda_ops():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3)&#039;)&lt;br /&gt;
    zeros = torch.zeros(2, 3)&lt;br /&gt;
    print(zeros, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3).cuda()&#039;)&lt;br /&gt;
    cuda_zero = torch.zeros(2, 3).cuda()&lt;br /&gt;
    print(cuda_zero, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.tensor([[1, 2, 3], [4, 5, 6]])&#039;)&lt;br /&gt;
    tensor_a = torch.tensor([[1, 2, 3], [4, 5, 6]]).cuda()&lt;br /&gt;
    print(tensor_a, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a + cuda_zero&#039;)&lt;br /&gt;
    sum = tensor_a + cuda_zero&lt;br /&gt;
    print(sum, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a * cuda_twos&#039;)&lt;br /&gt;
    tensor_a = tensor_a.to(torch.float)&lt;br /&gt;
    cuda_zero = cuda_zero.to(torch.float)&lt;br /&gt;
    cuda_twos = (cuda_zero + 1.0) * 2.0&lt;br /&gt;
    product = tensor_a * cuda_twos&lt;br /&gt;
    print(product, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.matmul(tensor_a, cuda_twos.T)&#039;)&lt;br /&gt;
    mat_mul = torch.matmul(tensor_a, cuda_twos.T)&lt;br /&gt;
    print(mat_mul, &#039;\n&#039;)&lt;br /&gt;
    try:&lt;br /&gt;
        get_version()&lt;br /&gt;
    except Exception as e:&lt;br /&gt;
        print(&#039;get_version() failed, exception message below:&#039;)&lt;br /&gt;
        print(e)&lt;br /&gt;
&lt;br /&gt;
    try:&lt;br /&gt;
        check_cuda()&lt;br /&gt;
    except Exception as e:&lt;br /&gt;
        print(&#039;check_cuda() failed, exception message below:&#039;)&lt;br /&gt;
        print(e)&lt;br /&gt;
&lt;br /&gt;
    #print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; time.sleep(20)&#039;)&lt;br /&gt;
    #time.sleep(20)&lt;br /&gt;
&lt;br /&gt;
    try:&lt;br /&gt;
        check_cuda_ops()&lt;br /&gt;
    except Exception as e:&lt;br /&gt;
        print(&#039;check_cuda_ops() failed, exception message below:&#039;)&lt;br /&gt;
        print(e)&lt;br /&gt;
&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
== Tensorflow ==&lt;br /&gt;
&lt;br /&gt;
&amp;lt;nowiki&amp;gt;&lt;br /&gt;
import tensorflow as tf&lt;br /&gt;
hasGPUSupport = tf.test.is_built_with_cuda()&lt;br /&gt;
gpuList = tf.config.list_physical_devices(&#039;GPU&#039;)&lt;br /&gt;
print(&amp;quot;Tensorflow Compiled with CUDA/GPU Support:&amp;quot;, hasGPUSupport)&lt;br /&gt;
print(&amp;quot;Tensorflow can access&amp;quot;, len(gpuList), &amp;quot;GPU&amp;quot;)&lt;br /&gt;
print(&amp;quot;Accessible GPUs are:&amp;quot;)&lt;br /&gt;
print(gpuList)&lt;br /&gt;
&lt;br /&gt;
tf.debugging.set_log_device_placement(True)&lt;br /&gt;
# Place tensors on the GPU&lt;br /&gt;
with tf.device(&#039;device:GPU:0&#039;):&lt;br /&gt;
  a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])&lt;br /&gt;
  b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])&lt;br /&gt;
&lt;br /&gt;
# Run on the GPU&lt;br /&gt;
c = tf.matmul(a, b)&lt;br /&gt;
print(c)&lt;br /&gt;
&amp;lt;/nowiki&amp;gt;&lt;/div&gt;</summary>
		<author><name>Brumm025</name></author>
	</entry>
	<entry>
		<id>https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2249</id>
		<title>JupyterHub with GPU</title>
		<link rel="alternate" type="text/html" href="https://wiki.anunna.wur.nl/index.php?title=JupyterHub_with_GPU&amp;diff=2249"/>
		<updated>2023-10-06T11:28:54Z</updated>

		<summary type="html">&lt;p&gt;Brumm025: How to create a jupyterhub instance with GPU enabled&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;Create a jupyterhub instance with GPU support enabled.&lt;br /&gt;
&lt;br /&gt;
= setup =&lt;br /&gt;
&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Log_in_to_Anunna Connect to login node of Anunna]&lt;br /&gt;
* [https://wiki.anunna.wur.nl/index.php/Running_Snakemake_pipelines#Installation Install miniconda]&lt;br /&gt;
&lt;br /&gt;
== Create conda environment that we can use for a jupyter kernel == &lt;br /&gt;
&lt;br /&gt;
 &amp;lt;nowiki&amp;gt;&lt;br /&gt;
conda create -y -n kernel_test python=3 ipykernel &amp;amp;&amp;amp; conda activate kernel_test&lt;br /&gt;
python -m ipykernel install --user --name kernel_test&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
NOTE: You can specific the python version for you conda environment with python=3 Please take care what python version is compatible with you required packages. &lt;br /&gt;
&lt;br /&gt;
== Install required packages ==&lt;br /&gt;
&lt;br /&gt;
For pytorch you can find information [https://pytorch.org/get-started/locally/ here] and for TensorFlow [https://www.tensorflow.org/install/pip here].&lt;br /&gt;
 &lt;br /&gt;
As an example I use the following pytorch installation:&lt;br /&gt;
&lt;br /&gt;
&amp;lt;nowiki&amp;gt;&lt;br /&gt;
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
= Start jupyter notebook with GPU = &lt;br /&gt;
&lt;br /&gt;
Go [https://notebook.anunna.wur.nl here] and select:&lt;br /&gt;
&lt;br /&gt;
* Select a location for your server: on the cluster (default option)&lt;br /&gt;
* Partition to use: gpu&lt;br /&gt;
* Partition to use: gpu&lt;br /&gt;
* Memory (in MB): desired memory&lt;br /&gt;
* Number of CPUs: desired CPU count&lt;br /&gt;
* Maximum execution time (hours:minutes:seconds): maximum amount of time the notebook is available&lt;br /&gt;
* Extra options: --gres=gpu:1 (default when selecting GPU, gpu:x for x amount of GPUs)&lt;br /&gt;
&lt;br /&gt;
= Test GPU availability =&lt;br /&gt;
&lt;br /&gt;
== Pytorch == &lt;br /&gt;
&lt;br /&gt;
&amp;lt;nowiki&amp;gt;&lt;br /&gt;
def check_all_cuda_devices():&lt;br /&gt;
    device_count = torch.cuda.device_count()&lt;br /&gt;
    for i in range(device_count):&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.device(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
        print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name({})&#039;.format(i))&lt;br /&gt;
        result = torch.cuda.get_device_name(i)&lt;br /&gt;
        print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.is_available()&#039;)&lt;br /&gt;
    result = torch.cuda.is_available()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device_count()&#039;)&lt;br /&gt;
    result = torch.cuda.device_count()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.current_device()&#039;)&lt;br /&gt;
    result = torch.cuda.current_device()&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.device(0)&#039;)&lt;br /&gt;
    result = torch.cuda.device(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.cuda.get_device_name(0)&#039;)&lt;br /&gt;
    result = torch.cuda.get_device_name(0)&lt;br /&gt;
    print(result, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    check_all_cuda_devices()&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
def check_cuda_ops():&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3)&#039;)&lt;br /&gt;
    zeros = torch.zeros(2, 3)&lt;br /&gt;
    print(zeros, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.zeros(2, 3).cuda()&#039;)&lt;br /&gt;
    cuda_zero = torch.zeros(2, 3).cuda()&lt;br /&gt;
    print(cuda_zero, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.tensor([[1, 2, 3], [4, 5, 6]])&#039;)&lt;br /&gt;
    tensor_a = torch.tensor([[1, 2, 3], [4, 5, 6]]).cuda()&lt;br /&gt;
    print(tensor_a, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a + cuda_zero&#039;)&lt;br /&gt;
    sum = tensor_a + cuda_zero&lt;br /&gt;
    print(sum, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; tensor_a * cuda_twos&#039;)&lt;br /&gt;
    tensor_a = tensor_a.to(torch.float)&lt;br /&gt;
    cuda_zero = cuda_zero.to(torch.float)&lt;br /&gt;
    cuda_twos = (cuda_zero + 1.0) * 2.0&lt;br /&gt;
    product = tensor_a * cuda_twos&lt;br /&gt;
    print(product, &#039;\n&#039;)&lt;br /&gt;
&lt;br /&gt;
    print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; torch.matmul(tensor_a, cuda_twos.T)&#039;)&lt;br /&gt;
    mat_mul = torch.matmul(tensor_a, cuda_twos.T)&lt;br /&gt;
    print(mat_mul, &#039;\n&#039;)&lt;br /&gt;
    try:&lt;br /&gt;
        get_version()&lt;br /&gt;
    except Exception as e:&lt;br /&gt;
        print(&#039;get_version() failed, exception message below:&#039;)&lt;br /&gt;
        print(e)&lt;br /&gt;
&lt;br /&gt;
    try:&lt;br /&gt;
        check_cuda()&lt;br /&gt;
    except Exception as e:&lt;br /&gt;
        print(&#039;check_cuda() failed, exception message below:&#039;)&lt;br /&gt;
        print(e)&lt;br /&gt;
&lt;br /&gt;
    #print(&#039;&amp;gt;&amp;gt;&amp;gt;&amp;gt; time.sleep(20)&#039;)&lt;br /&gt;
    #time.sleep(20)&lt;br /&gt;
&lt;br /&gt;
    try:&lt;br /&gt;
        check_cuda_ops()&lt;br /&gt;
    except Exception as e:&lt;br /&gt;
        print(&#039;check_cuda_ops() failed, exception message below:&#039;)&lt;br /&gt;
        print(e)&lt;br /&gt;
&amp;lt;/nowiki&amp;gt;&lt;br /&gt;
&lt;br /&gt;
== Tensorflow ==&lt;br /&gt;
&lt;br /&gt;
&amp;lt;nowiki&amp;gt;&lt;br /&gt;
import tensorflow as tf&lt;br /&gt;
hasGPUSupport = tf.test.is_built_with_cuda()&lt;br /&gt;
gpuList = tf.config.list_physical_devices(&#039;GPU&#039;)&lt;br /&gt;
print(&amp;quot;Tensorflow Compiled with CUDA/GPU Support:&amp;quot;, hasGPUSupport)&lt;br /&gt;
print(&amp;quot;Tensorflow can access&amp;quot;, len(gpuList), &amp;quot;GPU&amp;quot;)&lt;br /&gt;
print(&amp;quot;Accessible GPUs are:&amp;quot;)&lt;br /&gt;
print(gpuList)&lt;br /&gt;
&lt;br /&gt;
tf.debugging.set_log_device_placement(True)&lt;br /&gt;
# Place tensors on the GPU&lt;br /&gt;
with tf.device(&#039;device:GPU:0&#039;):&lt;br /&gt;
  a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])&lt;br /&gt;
  b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])&lt;br /&gt;
&lt;br /&gt;
# Run on the GPU&lt;br /&gt;
c = tf.matmul(a, b)&lt;br /&gt;
print(c)&lt;br /&gt;
&amp;lt;/nowiki&amp;gt;&lt;/div&gt;</summary>
		<author><name>Brumm025</name></author>
	</entry>
</feed>