1
0
This commit is contained in:
2024-06-07 02:27:57 +08:00
parent 14a3489609
commit 90be4a6a24
4 changed files with 62 additions and 5 deletions

View File

@@ -15,7 +15,7 @@ conda install pytorch::pytorch torchvision torchaudio -c pytorch -y
pip install -r requirements.txt
```
## MAC
## WIN
```shell
# 安装 pytorch v1.12版本已经正式支持了用于mac m1芯片gpu加速的mps后端
conda install pytorch torchvision torchaudio pytorch-cuda=12.1 -c pytorch -c nvidia
@@ -23,7 +23,15 @@ conda install pytorch torchvision torchaudio pytorch-cuda=12.1 -c pytorch -c nvi
pip install -r requirements.txt
```
## gpt4free
## Linux
```shell
# 安装 pytorch v1.12版本已经正式支持了用于mac m1芯片gpu加速的mps后端
conda install pytorch torchvision torchaudio pytorch-cuda=12.1 -c pytorch -c nvidia
pip install -r requirements.txt
```
pip install -U g4f[all]
## Proxy
```shell
-i https://pypi.tuna.tsinghua.edu.cn/simple
```

View File

@@ -2,6 +2,7 @@ import matplotlib.pyplot as plt
import numpy as np
import torch
# 线性回归训练代码
def compute_error_for_line_given_points(b, w, points):
totalError = 0
@@ -12,6 +13,7 @@ def compute_error_for_line_given_points(b, w, points):
totalError += (y - (w * x + b)) ** 2
return totalError / N
def step_gradient(b_current, w_current, points, learningRate):
b_gradient = torch.tensor(0.0, device=points.device)
w_gradient = torch.tensor(0.0, device=points.device)
@@ -25,25 +27,29 @@ def step_gradient(b_current, w_current, points, learningRate):
new_w = w_current - (learningRate * w_gradient)
return [new_b, new_w]
def gradient_descent_runner(points, starting_b, starting_w, learningRate, num_iterations):
b = torch.tensor(starting_b, device=points.device)
w = torch.tensor(starting_w, device=points.device)
for i in range(num_iterations):
b, w = step_gradient(b, w, points, learningRate)
print("round:", i)
return [b, w]
def run():
points_np = np.genfromtxt("data1.csv", delimiter=',').astype(np.float32)
points = torch.tensor(points_np, device='cuda')
points = torch.tensor(points_np, device='cuda:5')
learning_rate = 0.0001
initial_b = 0.0
initial_w = 0.0
num_iterations = 100000
[b, w] = gradient_descent_runner(points, initial_b, initial_w, learning_rate, num_iterations)
print("After gradient descent at b={0}, w={1}, error={2}".format(b.item(), w.item(),
compute_error_for_line_given_points(b, w, points)))
compute_error_for_line_given_points(b, w, points)))
return b.item(), w.item()
# 运行线性回归
final_b, final_w = run()

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

After

Width:  |  Height:  |  Size: 34 KiB

43
mnist/README.md Normal file
View File

@@ -0,0 +1,43 @@
# No deep learning,just function mapping
$$
X = [v_1,v_2,.....,v_{784}]\\
X:[1,dx]
$$
$$
H_1 = XW_{1} + b_{1} \\
W_1:[d_1,dx] \\
b_1:[d_1]
$$
$$
H_2 = H_1W_2 + b_2 \\
W_1:[d_2,d_1] \\
b_1:[d_2]
$$
$$
H_3=H_2W_3 + b_3 \\
W_3:[10,d_2]\\
b_3:[10]
$$
## Loss
$$
H_3:[1,d_3] \\
Y:[0/1/2/.../9] \\
eg.:1\geq[0,1,0,0,0,0,0,0,0,0,0] \\
eg.:3\geq[0,0,0,1,0,0,0,0,0,0,0] \\
Euclidean\ Distance:H_3\ vs\ Y
$$
## In a nutshell
$$
pred = W_3 \times \{W_2\cdot[W_1X+b_1]+b_2\}+b_3
$$