File size: 13,878 Bytes
d8a562e
e9f53f0
 
f236326
 
5ce3bc6
 
 
 
 
e9f53f0
 
 
 
 
 
 
 
bb4818f
 
 
e9f53f0
d8a562e
 
e1dae5e
 
 
 
 
 
 
d23d1fc
e1dae5e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5ce3bc6
e1dae5e
 
5ce3bc6
e1dae5e
 
5ce3bc6
e1dae5e
 
 
 
 
e9f53f0
e1dae5e
 
 
5ce3bc6
e9f53f0
bb4818f
e9f53f0
 
bb4818f
 
e9f53f0
bb4818f
e9f53f0
 
bb4818f
e9f53f0
bb4818f
e9f53f0
 
 
bb4818f
e1dae5e
e9f53f0
 
 
 
d23d1fc
 
e1dae5e
 
 
5ce3bc6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d23d1fc
 
5ce3bc6
 
 
 
d23d1fc
5ce3bc6
 
 
 
 
 
 
 
 
 
 
 
d23d1fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f236326
 
 
 
 
e1dae5e
5ce3bc6
 
e1dae5e
 
 
 
 
 
5ce3bc6
bb4818f
d23d1fc
5ce3bc6
 
 
bb4818f
 
 
 
5ce3bc6
bb4818f
 
 
 
 
 
d23d1fc
bb4818f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e1dae5e
bb4818f
 
 
 
 
e1dae5e
d23d1fc
e1dae5e
 
 
d23d1fc
e1dae5e
d23d1fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bb4818f
 
 
e1dae5e
f236326
 
 
 
 
 
 
e1dae5e
bb4818f
 
f236326
 
e1dae5e
bb4818f
 
f236326
 
 
 
 
 
 
e1dae5e
f236326
 
 
 
 
bb4818f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e1dae5e
9c47a26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bb4818f
 
 
d23d1fc
9c47a26
5ce3bc6
 
bb4818f
 
f236326
 
 
5ce3bc6
 
bb4818f
 
 
 
 
 
 
5ce3bc6
bb4818f
 
 
 
d23d1fc
 
 
 
 
bb4818f
d23d1fc
 
bb4818f
 
d23d1fc
 
 
 
bb4818f
 
e1dae5e
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
import gradio as gr
from pint import UnitRegistry

from data.mixes import MIXES, find_mix

from ecologits.tracers.utils import compute_llm_impacts, _avg
from ecologits.impacts.llm import compute_llm_impacts as compute_llm_impacts_expert
from ecologits.impacts.llm import IF_ELECTRICITY_MIX_GWP, IF_ELECTRICITY_MIX_ADPE, IF_ELECTRICITY_MIX_PE
from ecologits.model_repository import models

u = UnitRegistry()
u.define('kWh = kilowatt_hour')
u.define('Wh = watt_hour')
u.define('gCO2eq = gram')
u.define('kgCO2eq = kilogram')
u.define('kgSbeq = kilogram')
u.define('MJ = megajoule')
u.define('kJ = kilojoule')
u.define('m = meter')
u.define('km = kilometer')
u.define('episodes = number of episodes')
q = u.Quantity


MODELS = [
    ("OpenAI / GPT-3.5-Turbo", "openai/gpt-3.5-turbo"),
    ("OpenAI / GPT-4", "openai/gpt-4"),
    ("Anthropic / Claude 3 Opus", "anthropic/claude-3-opus-20240229"),
    ("Anthropic / Claude 3 Sonnet", "anthropic/claude-3-sonnet-20240229"),
    ("Anthropic / Claude 3 Haiku", "anthropic/claude-3-haiku-20240307"),
    ("Anthropic / Claude 2.1", "anthropic/claude-2.1"),
    ("Anthropic / Claude 2.0", "anthropic/claude-2.0"),
    ("Anthropic / Claude Instant 1.2", "anthropic/claude-instant-1.2"),
    ("Mistral AI / Mistral 7B", "mistralai/open-mistral-7b"),
    ("Mistral AI / Mixtral 8x7B", "mistralai/open-mixtral-8x7b"),
    ("Mistral AI / Mixtral 8x22B", "mistralai/open-mixtral-8x22b"),
    ("Mistral AI / Tiny", "mistralai/mistral-tiny-2312"),
    ("Mistral AI / Small", "mistralai/mistral-small-2402"),
    ("Mistral AI / Medium", "mistralai/mistral-medium-2312"),
    ("Mistral AI / Large", "mistralai/mistral-large-2402"),
    ("Meta / Llama 3 8B", "huggingface_hub/meta-llama/Meta-Llama-3-8B"),
    ("Meta / Llama 3 70B", "huggingface_hub/meta-llama/Meta-Llama-3-70B"),
    ("Meta / Llama 2 7B", "huggingface_hub/meta-llama/Llama-2-7b-hf"),
    ("Meta / Llama 2 13B", "huggingface_hub/meta-llama/Llama-2-13b-hf"),
    ("Meta / Llama 2 70B", "huggingface_hub/meta-llama/Llama-2-70b-hf"),
    ("Cohere / Command Light", "cohere/command-light"),
    ("Cohere / Command", "cohere/command"),
    ("Cohere / Command R", "cohere/command-r"),
    ("Cohere / Command R+", "cohere/command-r-plus"),
]

PROMPTS = [
    ("Write a Tweet", 50),
    ("Write an email", 170),
    ("Write an article summary", 250),
    ("Small conversation with a chatbot", 400),
    ("Write a report of 5 pages", 5000),
]
PROMPTS = [(s + f" ({v} output tokens)", v) for (s, v) in PROMPTS]


def format_indicator(name: str, value: str, unit: str) -> str:
    return f"""
    ## {name}
    $$ \LARGE {value} \ \large {unit} $$
    """


def form_output(impacts):
    energy_ = q(impacts.energy.value, impacts.energy.unit)
    eq_energy_ = q(impacts.energy.value * 2, 'km')
    if energy_ < q("1 kWh"):
        energy_ = energy_.to("Wh")
        eq_energy_ = q(impacts.energy.value * 2000, 'm')
    
    gwp_ = q(impacts.gwp.value, impacts.gwp.unit)
    eq_gwp_ = q(impacts.gwp.value / 0.032, 'episodes')
    if gwp_ < q("1 kgCO2eq"):
        gwp_ = gwp_.to("1 gCO2eq")
        eq_gwp_ = q(impacts.gwp.value / 0.032, 'episodes')
    adpe_ = q(impacts.adpe.value, impacts.adpe.unit)
    
    pe_ = q(impacts.pe.value, impacts.pe.unit)
    if pe_ < q("1 MJ"):
        pe_ = pe_.to("kJ")
    
    return (
        format_indicator("โšก๏ธ Energy", f"{energy_.magnitude:.3g}", energy_.units),
        format_indicator("๐ŸŒ GHG Emissions", f"{gwp_.magnitude:.3g}", gwp_.units),
        format_indicator("๐Ÿชจ Abiotic Resources", f"{adpe_.magnitude:.3g}", adpe_.units),
        format_indicator("โ›ฝ๏ธ Primary Energy", f"{pe_.magnitude:.3g}", pe_.units),
        format_indicator("๐Ÿ”‹ Equivalent energy: distance with a small electric car", f"{eq_energy_.magnitude:.3g}", eq_energy_.units),
        format_indicator("๐Ÿฐ Equivalent emissions for 1000 prompts: watching GoT in streaming", f"{eq_gwp_.magnitude:.3g}", eq_gwp_.units)
    )


def form(
    model_name: str,
    prompt_generated_tokens: int
):
    provider, model_name = model_name.split('/', 1)
    impacts = compute_llm_impacts(
        provider=provider,
        model_name=model_name,
        output_token_count=prompt_generated_tokens,
        request_latency=100000
    )
    return form_output(impacts)


def form_expert(
    model_active_params: float,
    model_total_params: float,
    prompt_generated_tokens: int,
    mix_gwp: float,
    mix_adpe: float,
    mix_pe: float
): 
    impacts = compute_llm_impacts_expert(
        model_active_parameter_count=model_active_params,
        model_total_parameter_count=model_total_params,
        output_token_count=prompt_generated_tokens,
        request_latency=100000, 
        if_electricity_mix_gwp=mix_gwp,
        if_electricity_mix_adpe=mix_adpe,
        if_electricity_mix_pe=mix_pe
    )
    return form_output(impacts)


CUSTOM = "Custom"
def custom(): 
    return CUSTOM

def model_active_params_fn(model_name: str, n_param: float):
    if model_name == CUSTOM:
        return n_param
    provider, model_name = model_name.split('/', 1)
    model = models.find_model(provider=provider, model_name=model_name)
    return model.active_parameters or _avg(model.active_parameters_range)

def model_total_params_fn(model_name: str, n_param: float):
    if model_name == CUSTOM:
        return n_param
    provider, model_name = model_name.split('/', 1)
    model = models.find_model(provider=provider, model_name=model_name)
    return model.total_parameters or _avg(model.total_parameters_range)

def mix_fn(country_code: str, mix_adpe: float, mix_pe: float, mix_gwp: float):
    if country_code == CUSTOM:
        return mix_gwp, mix_adpe, mix_pe
    return find_mix(country_code)

with gr.Blocks() as demo:

### TITLE
    gr.Markdown("""
    # ๐ŸŒฑ EcoLogits Calculator
    
    **EcoLogits** is a python library that tracks the **energy consumption** and **environmental footprint** of using 
    **generative AI** models through APIs.

    Read the documentation: 
    [ecologits.ai](https://ecologits.ai) | โญ๏ธ us on GitHub: [genai-impact/ecologits](https://github.com/genai-impact/ecologits) |
    โœ… Follow us on Linkedin: [GenAI Impact](https://www.linkedin.com/company/genai-impact/posts/?feedView=all) 
    """)

### SIMPLE CALCULATOR
    with gr.Tab("Home"):
        gr.Markdown(""" 
        ## ๐Ÿ˜Š Calculator
        """)

        with gr.Row():
            model = gr.Dropdown(
                MODELS,
                label="Model name",
                value="openai/gpt-3.5-turbo",
                filterable=True,
            )                
            prompt = gr.Dropdown(
                PROMPTS,
                label="Example prompt",
                value=50
            )

        with gr.Row():
            energy = gr.Markdown(
                label="energy",
                latex_delimiters=[{"left": "$$", "right": "$$", "display": False}]
            )
            gwp = gr.Markdown(
                label="gwp",
                latex_delimiters=[{"left": "$$", "right": "$$", "display": False}]
            )
            adpe = gr.Markdown(
                label="adpe",
                latex_delimiters=[{"left": "$$", "right": "$$", "display": False}]
            )
            pe = gr.Markdown(
                label="pe",
                latex_delimiters=[{"left": "$$", "right": "$$", "display": False}]
            )
            
        gr.Markdown('---')
            
        with gr.Row():
            equivalent_1 = gr.Markdown(
                label="eq_energy",
                latex_delimiters=[{"left": "$$", "right": "$$", "display": False}]
            )
            equivalent_2 = gr.Markdown(
                label="eq_gwp",
                latex_delimiters=[{"left": "$$", "right": "$$", "display": False}]
            )
        
        submit_btn = gr.Button("Submit")
        submit_btn.click(fn=form, inputs=[model, prompt], outputs=[energy, gwp, adpe, pe, equivalent_1, equivalent_2])

### EXPERT CALCULATOR
    with gr.Tab("Expert Mode"):
        gr.Markdown(""" 
        ## ๐Ÿค“ Expert mode
        """)
        model = gr.Dropdown(
            MODELS + [CUSTOM],
            label="Model name",
            value="openai/gpt-3.5-turbo",
            filterable=True,
            interactive=True
        )
        model_active_params = gr.Number(
            label="Number of millions of active parameters",
            value=45.0,
            interactive=True  
        )
        model_total_params = gr.Number(
            label="Number of millions of total parameters",
            value=45.0,
            interactive=True  
        )
    
        model.change(fn=model_active_params_fn, inputs=[model, model_active_params], outputs=[model_active_params])
        model.change(fn=model_total_params_fn, inputs=[model, model_total_params], outputs=[model_total_params])
        model_active_params.input(fn=custom, outputs=[model])
        model_total_params.input(fn=custom, outputs=[model])

        tokens = gr.Number(
            label="Output tokens", 
            value=100
        )

        mix = gr.Dropdown(
            MIXES + [CUSTOM], 
            label="Location",
            value="WOR", 
            filterable=True,
            interactive=True
        )
        mix_adpe = gr.Number(
            label="Electricity mix - Abiotic resources [kgSbeq / kWh]",
            value=IF_ELECTRICITY_MIX_ADPE, 
            interactive=True
        )
        mix_pe = gr.Number(
            label="Electricity mix - Primary energy [MJ / kWh]",
            value=IF_ELECTRICITY_MIX_PE, 
            interactive=True
        )
        mix_gwp = gr.Number(
            label="Electricity mix - GHG emissions [kgCO2eq / kWh]",
            value=IF_ELECTRICITY_MIX_GWP, 
            interactive=True
        )

        mix.change(fn=mix_fn, inputs=[mix, mix_adpe, mix_pe, mix_gwp], outputs=[mix_adpe, mix_pe, mix_gwp])
        mix_adpe.input(fn=custom, outputs=mix)
        mix_pe.input(fn=custom, outputs=mix)
        mix_gwp.input(fn=custom, outputs=mix)
        
        with gr.Row():
            energy = gr.Markdown(
                label="energy",
                latex_delimiters=[{"left": "$$", "right": "$$", "display": False}]
            )
            gwp = gr.Markdown(
                label="gwp",
                latex_delimiters=[{"left": "$$", "right": "$$", "display": False}]
            )
            adpe = gr.Markdown(
                label="adpe",
                latex_delimiters=[{"left": "$$", "right": "$$", "display": False}]
            )
            pe = gr.Markdown(
                label="pe",
                latex_delimiters=[{"left": "$$", "right": "$$", "display": False}]
            )

        with gr.Row():
            energy = gr.Markdown(
                label="energy",
                latex_delimiters=[{"left": "$$", "right": "$$", "display": False}]
            )
            gwp = gr.Markdown(
                label="gwp",
                latex_delimiters=[{"left": "$$", "right": "$$", "display": False}]
            )
            adpe = gr.Markdown(
                label="adpe",
                latex_delimiters=[{"left": "$$", "right": "$$", "display": False}]
            )
            pe = gr.Markdown(
                label="pe",
                latex_delimiters=[{"left": "$$", "right": "$$", "display": False}]
            )
            
        gr.Markdown('---')
            
        with gr.Row():
            equivalent_1 = gr.Markdown(
                label="eq_energy",
                latex_delimiters=[{"left": "$$", "right": "$$", "display": False}]
            )
            equivalent_2 = gr.Markdown(
                label="eq_gwp",
                latex_delimiters=[{"left": "$$", "right": "$$", "display": False}]
            )

        submit_btn = gr.Button("Submit")
        submit_btn.click(
            fn=form_expert, 
            inputs=[model_active_params, model_total_params, tokens, mix_gwp, mix_adpe, mix_pe], 
            outputs=[energy, gwp, adpe, pe, equivalent_1, equivalent_2]
        )

### METHOD QUICK EXPLANATION
    with gr.Tab('Methodology'):
        gr.Markdown("""## ๐Ÿ“– Methodology
        ๐Ÿšง Under construction
        """)

### INFORMATION ABOUT INDICATORS
    with gr.Accordion("๐Ÿ“Š More about the indicators", open = False):
        gr.Markdown("""
        - โšก๏ธ **Energy**: Final energy consumption, 
        - ๐ŸŒ **GHG Emissions**: Potential impact on global warming (commonly known as GHG/carbon emissions), 
        - ๐Ÿชจ **Abiotic Resources**: Impact on the depletion of non-living resources such as minerals or metals, 
        - โ›ฝ๏ธ **Primary Energy**: Total energy consumed from primary sources.
        """)

### INFORMATION ABOUT REDUCING IMPACTS
    with gr.Accordion("๐Ÿ“‰ How to reduce / limit these impacts ?", open = False):
        gr.Markdown("""
                    
        * โ“ **Fundamental rule**: Show **sobriety** on the uses of (generative) AI
            * Questionning the usefulness of the project;
            * Estimating impacts of the project;
            * Evaluating the project purpose;
            * Restricting the use case to the desired purposes.
        
        * ๐Ÿฆพ On the hardware side
            * If you can, try to relocate the computing in low emissions and/or energy efficient datacenters.
        
        * ๐Ÿค– On the ML side :
            * Develop a zero-shot learning approach for general tasks;
            * Prefer the smaller and yet well-peforming models (using number of parameters for example); 
            * If a specialization is needed, always prefer fine-tuning an existing model than re-training one from scratch;
            * During model inference, try caching the most popular prompts ("hey, tell me a joke about ...").
            
        """)

if __name__ == '__main__':
    demo.launch()