Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- import numpy as np
- from tensorflow import keras
- from tqdm import tqdm
- # Sample data
- X = np.random.rand(300, 20)
- y = np.random.randint(0, 2, size=(300,))
- # Custom AdamW optimizer configuration
- learning_rate = 0.001
- weight_decay = 0.004
- # Build a simple neural network
- model = keras.Sequential([
- keras.layers.Dense(32, activation='relu', input_shape=(20,)),
- keras.layers.Dense(1, activation='sigmoid')
- ])
- # Create an Adam optimizer
- adam_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
- # Apply weight decay to kernel (weight) variables
- for var in model.trainable_variables:
- if 'kernel' in var.name:
- adam_optimizer.apply_gradients([(-learning_rate * weight_decay * var, var)])
- # Compile the model using the custom AdamW optimizer with weight decay
- model.compile(optimizer=adam_optimizer, loss='binary_crossentropy', metrics=['accuracy'])
- # Define the number of epochs
- total_epochs = 1000
- # Train the model with a custom progress bar
- for epoch in tqdm(range(total_epochs), unit="epoch"):
- if (epoch + 1) % 100 == 0:
- verbose = 1
- tqdm.write(f"Epoch {epoch + 1}/{total_epochs}")
- else:
- verbose = 0
- model.fit(X, y, epochs=1, batch_size=32, verbose=verbose)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement