|
| 1 | +# Copyright 2026 Hackable Diffusion Authors. |
| 2 | +# |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +# you may not use this file except in compliance with the License. |
| 5 | +# You may obtain a copy of the License at |
| 6 | +# |
| 7 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +# |
| 9 | +# Unless required by applicable law or agreed to in writing, software |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +# See the License for the specific language governing permissions and |
| 13 | +# limitations under the License. |
| 14 | + |
| 15 | +"""Benchmark suite for Hackable Diffusion optimizations.""" |
| 16 | + |
| 17 | +import time |
| 18 | +import jax |
| 19 | +import jax.numpy as jnp |
| 20 | +from hackable_diffusion.lib.architecture import attention |
| 21 | +from hackable_diffusion.lib.architecture import normalization |
| 22 | +from hackable_diffusion.lib.architecture import dit_blocks |
| 23 | + |
| 24 | +def benchmark_component(name, fn, *args, iters=100, warmup=10): |
| 25 | + # Warmup |
| 26 | + for _ in range(warmup): |
| 27 | + fn(*args).block_until_ready() |
| 28 | + |
| 29 | + # Measure |
| 30 | + start = time.time() |
| 31 | + for _ in range(iters): |
| 32 | + fn(*args).block_until_ready() |
| 33 | + end = time.time() |
| 34 | + |
| 35 | + avg_ms = (end - start) / iters * 1000 |
| 36 | + print(f"{name:.<30} {avg_ms:.4f} ms") |
| 37 | + return avg_ms |
| 38 | + |
| 39 | +def run_all(): |
| 40 | + print("Starting Hackable Diffusion Optimizations Benchmark...") |
| 41 | + print("-" * 50) |
| 42 | + |
| 43 | + key = jax.random.PRNGKey(0) |
| 44 | + |
| 45 | + # 1. Attention |
| 46 | + batch, seq, heads, hdim = 16, 1024, 16, 64 |
| 47 | + x_attn = jax.random.normal(key, (batch, seq, heads * hdim)) |
| 48 | + mha = attention.MultiHeadAttention(num_heads=heads, head_dim=hdim) |
| 49 | + params_attn = mha.init(key, x_attn, None) |
| 50 | + |
| 51 | + @jax.jit |
| 52 | + def attn_fn(p, x): return mha.apply(p, x, None) |
| 53 | + benchmark_component("MultiHeadAttention (Flash)", attn_fn, params_attn, x_attn) |
| 54 | + |
| 55 | + # 2. RMSNorm |
| 56 | + x_norm = jax.random.normal(key, (batch, 128, 128, 64)) |
| 57 | + norm = normalization.NormalizationLayer( |
| 58 | + normalization_method=normalization.NormalizationType.RMS_NORM, |
| 59 | + conditional=False |
| 60 | + ) |
| 61 | + params_norm = norm.init(key, x_norm) |
| 62 | + |
| 63 | + @jax.jit |
| 64 | + def norm_fn(p, x): return norm.apply(p, x) |
| 65 | + benchmark_component("RMSNorm (Fused)", norm_fn, params_norm, x_norm) |
| 66 | + |
| 67 | + # 3. DiT Block |
| 68 | + x_dit = jax.random.normal(key, (batch, 256, 512)) |
| 69 | + cond = jax.random.normal(key, (batch, 512)) |
| 70 | + dit = dit_blocks.DiTBlockAdaLNZero(hidden_size=512, num_heads=8) |
| 71 | + params_dit = dit.init(key, x_dit, cond, is_training=True) |
| 72 | + |
| 73 | + @jax.jit |
| 74 | + def dit_fn(p, x, c): return dit.apply(p, x, c, is_training=True) |
| 75 | + benchmark_component("DiT Block (Optimized)", dit_fn, params_dit, x_dit, cond) |
| 76 | + |
| 77 | + print("-" * 50) |
| 78 | + print("Benchmark Complete.") |
| 79 | + |
| 80 | +if __name__ == "__main__": |
| 81 | + run_all() |
0 commit comments