1+ {
2+ "nbformat" : 4 ,
3+ "nbformat_minor" : 0 ,
4+ "metadata" : {
5+ "colab" : {
6+ "name" : " stable-diffusion.ipynb" ,
7+ "private_outputs" : true ,
8+ "provenance" : [],
9+ "include_colab_link" : true
10+ },
11+ "kernelspec" : {
12+ "name" : " python3" ,
13+ "display_name" : " Python 3"
14+ },
15+ "language_info" : {
16+ "name" : " python"
17+ },
18+ "accelerator" : " GPU" ,
19+ "gpuClass" : " standard"
20+ },
21+ "cells" : [
22+ {
23+ "cell_type" : " markdown" ,
24+ "metadata" : {
25+ "id" : " view-in-github" ,
26+ "colab_type" : " text"
27+ },
28+ "source" : [
29+ " <a href=\" https://colab.research.google.com/github/ruchira-net/css-loader/blob/master/stable_diffusion.ipynb\" target=\" _parent\" ><img src=\" https://colab.research.google.com/assets/colab-badge.svg\" alt=\" Open In Colab\" /></a>"
30+ ]
31+ },
32+ {
33+ "cell_type" : " code" ,
34+ "source" : [
35+ " %pip install --quiet --upgrade diffusers transformers accelerate mediapy peft"
36+ ],
37+ "metadata" : {
38+ "id" : " ufD_d64nr08H"
39+ },
40+ "execution_count" : null ,
41+ "outputs" : []
42+ },
43+ {
44+ "cell_type" : " code" ,
45+ "source" : [
46+ " import mediapy as media\n " ,
47+ " import random\n " ,
48+ " import sys\n " ,
49+ " import torch\n " ,
50+ " \n " ,
51+ " from diffusers import DiffusionPipeline, TCDScheduler\n " ,
52+ " from huggingface_hub import hf_hub_download\n " ,
53+ " \n " ,
54+ " # Choose either 8 or 12 steps:\n " ,
55+ " num_inference_steps = 35\n " ,
56+ " \n " ,
57+ " base_model_id = \" stabilityai/stable-diffusion-xl-base-1.0\"\n " ,
58+ " repo_name = \" ByteDance/Hyper-SD\"\n " ,
59+ " plural = \" s\" if num_inference_steps > 1 else \"\"\n " ,
60+ " ckpt_name = f\" Hyper-SDXL-{num_inference_steps}step{plural}-CFG-lora.safetensors\"\n " ,
61+ " device = \" cuda\"\n " ,
62+ " \n " ,
63+ " pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16, variant=\" fp16\" ).to(device)\n " ,
64+ " pipe.load_lora_weights(hf_hub_download(repo_name, ckpt_name))\n " ,
65+ " pipe.fuse_lora()\n " ,
66+ " pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)"
67+ ],
68+ "metadata" : {
69+ "id" : " bG2hkmSEvByV"
70+ },
71+ "execution_count" : null ,
72+ "outputs" : []
73+ },
74+ {
75+ "cell_type" : " code" ,
76+ "source" : [
77+ " prompt = \" a photo of Pikachu fine dining with a view to the Eiffel Tower\"\n " ,
78+ " negative_prompt = \" text\"\n " ,
79+ " seed = random.randint(0, sys.maxsize)\n " ,
80+ " \n " ,
81+ " # Pick a value between 5.0 and 8.0:\n " ,
82+ " guidance_scale = 7\n " ,
83+ " \n " ,
84+ " # Decrease eta (min: 0, max: 1.0) to get more details with multi-step inference:\n " ,
85+ " eta = 0.5\n " ,
86+ " \n " ,
87+ " images = pipe(\n " ,
88+ " prompt = prompt,\n " ,
89+ " negative_prompt = negative_prompt,\n " ,
90+ " num_inference_steps = num_inference_steps,\n " ,
91+ " guidance_scale = guidance_scale,\n " ,
92+ " eta = eta,\n " ,
93+ " generator = torch.Generator(device).manual_seed(seed),\n " ,
94+ " ).images\n " ,
95+ " \n " ,
96+ " print(f\" Prompt:\\ t{prompt}\\ nSeed:\\ t{seed}\" )\n " ,
97+ " media.show_images(images)\n " ,
98+ " images[0].save(\" output.jpg\" )"
99+ ],
100+ "metadata" : {
101+ "id" : " AUc4QJfE-uR9"
102+ },
103+ "execution_count" : null ,
104+ "outputs" : []
105+ }
106+ ]
107+ }
0 commit comments