250315
This commit is contained in:
15
README.md
15
README.md
@@ -20,7 +20,7 @@ pip install -r requirements.txt
|
|||||||
|
|
||||||
## linux
|
## linux
|
||||||
```shell
|
```shell
|
||||||
conda create -n ail-tf python=3.9 -y
|
conda create -n ail-tf python=3.10 -y
|
||||||
conda create -n ail-pt python=3.10 -y
|
conda create -n ail-pt python=3.10 -y
|
||||||
|
|
||||||
conda install pytorch torchvision torchaudio pytorch-cuda=12.1 -c pytorch -c nvidia
|
conda install pytorch torchvision torchaudio pytorch-cuda=12.1 -c pytorch -c nvidia
|
||||||
@@ -30,6 +30,19 @@ pip install tensorflow
|
|||||||
pip install -r requirements.txt
|
pip install -r requirements.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## windows
|
||||||
|
```shell
|
||||||
|
conda create -n ail-tf python=3.9 -y
|
||||||
|
conda create -n ail-pt python=3.9 -y
|
||||||
|
conda install pytorch torchvision torchaudio pytorch-cuda=12.1 -c pytorch -c nvidia
|
||||||
|
|
||||||
|
# tf需要安装 CUDA 和 cuDNN
|
||||||
|
conda install cudatoolkit=11.8 cudnn=8.9 -c nvidia -y
|
||||||
|
|
||||||
|
pip install tensorflow
|
||||||
|
pip install -r requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
## 安装graphviz - 绘制模型图
|
## 安装graphviz - 绘制模型图
|
||||||
```shell
|
```shell
|
||||||
brew install graphviz
|
brew install graphviz
|
||||||
|
|||||||
325
lab/9_CNN-MNIST.ipynb
Normal file
325
lab/9_CNN-MNIST.ipynb
Normal file
File diff suppressed because one or more lines are too long
28
test/tf-cuda.py
Normal file
28
test/tf-cuda.py
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
|
# 打印 TensorFlow 版本
|
||||||
|
print("TensorFlow 版本:", tf.__version__)
|
||||||
|
|
||||||
|
# 检查 CUDA 是否可用
|
||||||
|
cuda_available = tf.test.is_built_with_cuda()
|
||||||
|
print(f"CUDA 支持: {cuda_available}")
|
||||||
|
|
||||||
|
# 检查 GPU 是否可用
|
||||||
|
gpu_available = tf.config.list_physical_devices('GPU')
|
||||||
|
print(f"GPU 可用: {gpu_available}")
|
||||||
|
|
||||||
|
# 打印可用设备
|
||||||
|
print("\n可用设备:")
|
||||||
|
for device in tf.config.list_physical_devices():
|
||||||
|
print(device)
|
||||||
|
|
||||||
|
# 如果 GPU 可用,打印详细信息
|
||||||
|
gpus = tf.config.list_physical_devices('GPU')
|
||||||
|
if gpus:
|
||||||
|
print("\nGPU 详细信息:")
|
||||||
|
for gpu in gpus:
|
||||||
|
print(gpu)
|
||||||
|
print("设备名称:", gpu.name)
|
||||||
|
print("设备类型:", gpu.device_type)
|
||||||
|
else:
|
||||||
|
print("\n未检测到GPU设备")
|
||||||
20
test/torch-cuda.py
Normal file
20
test/torch-cuda.py
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
import torch
|
||||||
|
|
||||||
|
# 打印 PyTorch 版本
|
||||||
|
print("PyTorch 版本:", torch.__version__)
|
||||||
|
|
||||||
|
# 检查 CUDA 是否可用
|
||||||
|
cuda_available = torch.cuda.is_available()
|
||||||
|
print(f"CUDA 可用: {cuda_available}")
|
||||||
|
|
||||||
|
# 如果 CUDA 可用,打印 GPU 信息
|
||||||
|
if cuda_available:
|
||||||
|
print("\nGPU 详细信息:")
|
||||||
|
print("GPU 数量:", torch.cuda.device_count())
|
||||||
|
print("当前 GPU:", torch.cuda.current_device())
|
||||||
|
print("GPU 名称:", torch.cuda.get_device_name(0))
|
||||||
|
print("GPU 内存:")
|
||||||
|
print(" 已分配:", torch.cuda.memory_allocated())
|
||||||
|
print(" 保留:", torch.cuda.memory_reserved())
|
||||||
|
else:
|
||||||
|
print("\n未检测到GPU设备")
|
||||||
Reference in New Issue
Block a user