The Transformer Architecture and the Self-attention Mechanism

The Transformer Architecture and the Self-attention Mechanism is part of the deep-learning revolution that has re-shaped computer vision, language understanding and scientific discovery. You'll learn how the architecture works, why it works, and when it pays to reach for it instead of a simpler model.

Why Transformer Architecture Self-attention Matters

Deep networks now define the state of the art in perception, language and code generation. Even if you don't train them from scratch, understanding how they work is essential for evaluating when and how to use them.

  • Start with a strong, simple baseline before adding layers.
  • Normalise inputs, initialise weights, and watch your loss curves.
  • Use regularisation (dropout, weight decay, augmentation) deliberately.
  • Transfer learning beats training from scratch for most practical tasks.

How Transformer Architecture Self-attention Shows Up in Practice

In a typical project, the transformer architecture and the self-attention mechanism is combined with the rest of the Deep Learning toolkit. You rarely use any one technique in isolation; the real skill is knowing which combination fits the problem you are trying to solve, and being able to explain that choice to a non-technical stakeholder.

Essential for image, audio, video and language systems, and increasingly competitive even for structured-data problems given enough samples.

Back to the Data Science curriculum →

Code Examples: The Transformer Architecture and the Self-attention (5 runnable snippets)

Copy any block into a file or notebook and run it end-to-end — each example stands alone.

Example 1: Autoencoder for anomaly detection

# Example 1: Autoencoder for anomaly detection -- The Transformer Architecture and the Self-attention
import torch
from torch import nn

torch.manual_seed(0)
normal   = torch.randn(1_000, 16)                            # training
anomaly  = torch.randn(50,    16) * 3 + 4                    # held-out outliers

class AE(nn.Module):
    def __init__(self, d=16, h=4):
        super().__init__()
        self.enc = nn.Sequential(nn.Linear(d, 8), nn.ReLU(), nn.Linear(8, h))
        self.dec = nn.Sequential(nn.Linear(h, 8), nn.ReLU(), nn.Linear(8, d))
    def forward(self, x): return self.dec(self.enc(x))

ae  = AE()
opt = torch.optim.Adam(ae.parameters(), lr=1e-3)
for epoch in range(40):
    loss = ((ae(normal) - normal) ** 2).mean()
    opt.zero_grad(); loss.backward(); opt.step()

err_normal  = ((ae(normal)  - normal)  ** 2).mean(dim=1).detach()
err_anomaly = ((ae(anomaly) - anomaly) ** 2).mean(dim=1).detach()
print(f"normal  median error : {err_normal.median():.3f}")
print(f"anomaly median error : {err_anomaly.median():.3f}")

Example 2: Self-attention from scratch in NumPy

# Example 2: Self-attention from scratch in NumPy -- The Transformer Architecture and the Self-attention
import numpy as np

rng = np.random.default_rng(0)
T, d_model, d_k = 6, 16, 8                        # sequence length, dims

x  = rng.standard_normal((T, d_model))
Wq = rng.standard_normal((d_model, d_k)) / np.sqrt(d_model)
Wk = rng.standard_normal((d_model, d_k)) / np.sqrt(d_model)
Wv = rng.standard_normal((d_model, d_k)) / np.sqrt(d_model)

Q, K, V = x @ Wq, x @ Wk, x @ Wv
scores  = Q @ K.T / np.sqrt(d_k)
weights = np.exp(scores - scores.max(axis=-1, keepdims=True))
weights = weights / weights.sum(axis=-1, keepdims=True)
out     = weights @ V

print("attention matrix (rounded):\n", np.round(weights, 2))
print("\noutput shape :", out.shape)

Example 3: PyTorch MLP training loop

# Example 3: PyTorch MLP training loop -- The Transformer Architecture and the Self-attention
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset

torch.manual_seed(0)
X = torch.randn(2_000, 20)
w = torch.randn(20, 1)
y = (X @ w + 0.3 * torch.randn(2_000, 1) > 0).float()

loader = DataLoader(TensorDataset(X, y), batch_size=64, shuffle=True)

model = nn.Sequential(
    nn.Linear(20, 64), nn.ReLU(),
    nn.Linear(64, 32), nn.ReLU(),
    nn.Linear(32, 1),
)
opt     = torch.optim.Adam(model.parameters(), lr=1e-3)
loss_fn = nn.BCEWithLogitsLoss()

for epoch in range(5):
    total = 0.0
    for xb, yb in loader:
        opt.zero_grad()
        loss = loss_fn(model(xb), yb)
        loss.backward()
        opt.step()
        total += loss.item() * xb.size(0)
    print(f"epoch {epoch+1}: loss = {total/len(loader.dataset):.4f}")

Example 4: Keras CNN for MNIST

# Example 4: Keras CNN for MNIST -- The Transformer Architecture and the Self-attention
import tensorflow as tf
from tensorflow.keras import layers, models

(x_tr, y_tr), (x_te, y_te) = tf.keras.datasets.mnist.load_data()
x_tr = x_tr[..., None] / 255.0
x_te = x_te[..., None] / 255.0

model = models.Sequential([
    layers.Conv2D(32, 3, activation="relu", input_shape=(28, 28, 1)),
    layers.MaxPool2D(),
    layers.Conv2D(64, 3, activation="relu"),
    layers.GlobalAveragePooling2D(),
    layers.Dense(10, activation="softmax"),
])
model.compile(optimizer="adam",
              loss="sparse_categorical_crossentropy",
              metrics=["accuracy"])
model.fit(x_tr, y_tr, epochs=3, batch_size=128, validation_split=0.1)
print("test acc:", round(model.evaluate(x_te, y_te, verbose=0)[1], 4))

Example 5: Fine-tune a classifier head on frozen embeddings

# Example 5: Fine-tune a classifier head on frozen embeddings -- The Transformer Architecture and the Self-attention
import torch
from torch import nn

torch.manual_seed(0)
emb_dim   = 384
train_emb = torch.randn(800, emb_dim)
train_y   = torch.randint(0, 4, (800,))

head = nn.Sequential(nn.Dropout(0.1), nn.Linear(emb_dim, 4))
opt  = torch.optim.AdamW(head.parameters(), lr=3e-4, weight_decay=1e-2)
loss_fn = nn.CrossEntropyLoss()

for step in range(200):
    idx     = torch.randint(0, len(train_emb), (64,))
    logits  = head(train_emb[idx])
    loss    = loss_fn(logits, train_y[idx])
    opt.zero_grad(); loss.backward(); opt.step()
    if step % 40 == 0:
        acc = (logits.argmax(1) == train_y[idx]).float().mean()
        print(f"step {step:3d}  loss={loss.item():.3f}  acc={acc.item():.3f}")