Skip to content

Train Demo

Demo

一个由numpy生成50个由一元二次函数(y = x^2 + bias)生成的点,反向推导出一元二次函数的示例

TensorFlow

python
import numpy as np
import tensorflow as tf

# 生成数据
np.random.seed(0)
x = np.random.uniform(-10, 10, 50).astype(np.float32)
true_bias = 5.0
y = x**2 + true_bias

# 定义可训练参数
bias = tf.Variable(0.0)

# 学习率和优化器
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)

# 训练循环
for step in range(2000):
    with tf.GradientTape() as tape:
        y_pred = x**2 + bias
        loss = tf.reduce_mean((y - y_pred)**2)
    grad = tape.gradient(loss, [bias])
    optimizer.apply_gradients(zip(grad, [bias]))
    
    if step % 200 == 0:
        print(f"Step {step}, Loss: {loss.numpy():.4f}, Bias: {bias.numpy():.4f}")

print(f"训练完成,拟合的 bias ≈ {bias.numpy():.4f}, 真实 bias = {true_bias}")

PyTorch

python
import numpy as np
import torch

# 生成数据
np.random.seed(0)
x = np.random.uniform(-10, 10, 50).astype(np.float32)
true_bias = 5.0
y = x**2 + true_bias

# 转换为 torch 张量
x_tensor = torch.tensor(x)
y_tensor = torch.tensor(y)

# 定义可训练参数
bias = torch.tensor(0.0, requires_grad=True)

# 优化器
optimizer = torch.optim.SGD([bias], lr=0.01)

# 训练循环
for step in range(2000):
    optimizer.zero_grad()
    y_pred = x_tensor**2 + bias
    loss = torch.mean((y_tensor - y_pred)**2)
    loss.backward()
    optimizer.step()
    
    if step % 200 == 0:
        print(f"Step {step}, Loss: {loss.item():.4f}, Bias: {bias.item():.4f}")

print(f"训练完成,拟合的 bias ≈ {bias.item():.4f}, 真实 bias = {true_bias}")

随便写写的,喜欢就好。 使用VitePress构建