Current location: Home> Gemini Tutorial> How to generate text using the Gemini API

How to generate text using the Gemini API

Author: LoRA Time:

The Gemini API can generate text output based on a variety of inputs, including text, pictures, videos, and audio.

Before calling the Gemini API, make sure that you have installed the selected SDK and have the Gemini API key configured for use.

1. Text input

The easiest way to generate text using the Gemini API is to provide a single plain text input to the model, as shown in the following example:

Python

 from google import genai
client = genai.Client(api_key="GEMINI_API_KEY")
response = client.models.generate_content(
   model="gemini-2.0-flash",
   contents=["How does AI work?"]
)
print(response.text)

JavaScript

 import { GoogleGenAI } from "@google/genai";
const ai = new GoogleGenAI({ apiKey: "GEMINI_API_KEY" });
async function main() {
 const response = await ai.models.generateContent({
   model: "gemini-2.0-flash",
   contents: "How does AI work?",
 });
 console.log(response.text);
}
await main();

Go

 // import packages here
func main() {
 ctx := context.Background()
 client, err := genai.NewClient(ctx, option.WithAPIKey(os.Getenv("GEMINI_API_KEY")))
 if err != nil {
   log.Fatal(err)
 }
 defer client.Close()
 model := client.GeneratedModel("gemini-2.0-flash")
 resp, err := model.GenerateContent(ctx, genai.Text("How does AI work?"))
 if err != nil {
   log.Fatal(err)
 }
 printResponse(resp) // helper function for printing content parts
}

REST

 curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" 
 -H 'Content-Type: application/json' 
 -X POST 
 -d '{
   "contents": [
     {
       "parts": [
         {
           "text": "How does AI work?"
         }
       ]
     }
   ]
 }'

Apps Scripts

 // See https://developers.google.com/apps-script/guides/properties
// for instructions on how to set the API key.
const apiKey = PropertiesService.getScriptProperties().getProperty('GEMINI_API_KEY');
function main() {
 const payload = {
   contents: [
     {
       parts: [
         { text: 'How AI does work?' },
       ],
     },
   ],
 };
 const url = `https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=${apiKey}`;
 const options = {
   method: 'POST',
   contentType: 'application/json',
   payload: JSON.stringify(payload)
 };
 const response = UrlFetchApp.fetch(url, options);
 const data = JSON.parse(response);
 const content = data['candidates'][0]['content']['parts'][0]['text'];
 console.log(content);
}

2. Picture input

The Gemini API supports multimodal input that combines text and media files. The following example shows how to generate text based on text and image input:

Python

 from PIL import Image
from google import genai
client = genai.Client(api_key="GEMINI_API_KEY")
image = Image.open("/path/to/organ.png")
response = client.models.generate_content(
   model="gemini-2.0-flash",
   contents=[image, "Tell me about this instrument"]
)
print(response.text)

JavaScript

 import {
 GoogleGenAI,
 createUserContent,
 createPartFromUri,
} from "@google/genai";
const ai = new GoogleGenAI({ apiKey: "GEMINI_API_KEY" });
async function main() {
 const image = await ai.files.upload({
   file: "/path/to/organ.png",
 });
 const response = await ai.models.generateContent({
   model: "gemini-2.0-flash",
   contents: [
     createUserContent([
       "Tell me about this instrument",
       createPartFromUri(image.uri, image.mimeType),
     ]),
   ],
 });
 console.log(response.text);
}
await main();

Go

 model := client.GeneratedModel("gemini-2.0-flash")
imgData, err := os.ReadFile(filepath.Join(testDataDir, "organ.jpg"))
if err != nil {
 log.Fatal(err)
}
resp, err := model.GenerateContent(ctx,
 genai.Text("Tell me about this instrument"),
 genai.ImageData("jpeg", imgData))
if err != nil {
 log.Fatal(err)
}
printResponse(resp)

REST

 # Use a temporary file to hold the base64 encoded image data
TEMP_B64=$(mktemp)
trap 'rm -f "$TEMP_B64"' EXIT
base64 $B64FLAGS $IMG_PATH > "$TEMP_B64"
# Use a temporary file to hold the JSON payload
TEMP_JSON=$(mktemp)
trap 'rm -f "$TEMP_JSON"' EXIT
cat > "$TEMP_JSON" << EOF
{
 "contents": [
   {
     "parts": [
       {
         "text": "Tell me about this instrument"
       },
       {
         "inline_data": {
           "mime_type": "image/jpeg",
           "data": "$(cat "$TEMP_B64")"
         }
       }
     ]
   }
 ]
}
EOF
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" 
 -H 'Content-Type: application/json' 
 -X POST 
 -d "@$TEMP_JSON"

Apps Scripts

 // See https://developers.google.com/apps-script/guides/properties
// for instructions on how to set the API key.
const apiKey = PropertiesService.getScriptProperties().getProperty('GEMINI_API_KEY');
function main() {
 const imageUrl = 'http://image/url';
 const image = getImageData(imageUrl);
 const payload = {
   contents: [
     {
       parts: [
         { image },
         { text: 'Tell me about this instrument' },
       ],
     },
   ],
 };
 const url = `https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=${apiKey}`;
 const options = {
   method: 'POST',
   contentType: 'application/json',
   payload: JSON.stringify(payload)
 };
 const response = UrlFetchApp.fetch(url, options);
 const data = JSON.parse(response);
 const content = data['candidates'][0]['content']['parts'][0]['text'];
 console.log(content);
}
function getImageData(url) {
 const blob = UrlFetchApp.fetch(url).getBlob();
 return {
   mimeType: blob.getContentType(),
   data: Utilities.base64Encode(blob.getBytes())
 };
}

3. Streaming output

By default, the model returns an answer after completing the entire text generation process. You can use streaming to return GenerateContentResponse instances when they are generated, enabling faster interactions.

Python

 from google import genai
client = genai.Client(api_key="GEMINI_API_KEY")
response = client.models.generate_content_stream(
   model="gemini-2.0-flash",
   contents=["Explain how AI works"]
)
for chunk in response:
   print(chunk.text, end="")

JavaScript

 import { GoogleGenAI } from "@google/genai";
const ai = new GoogleGenAI({ apiKey: "GEMINI_API_KEY" });
async function main() {
 const response = await ai.models.generateContentStream({
   model: "gemini-2.0-flash",
   contents: "Explain how AI works",
 });
 for await (const chunk of response) {
   console.log(chunk.text);
 }
}
await main();

Go

 model := client.GeneratedModel("gemini-1.5-flash")
iter := model.GenerateContentStream(ctx, genai.Text("Write a story about a magic backpack."))
for {
 resp, err := iter.Next()
 if err == iterator.Done {
   break
 }
 if err != nil {
   log.Fatal(err)
 }
 printResponse(resp)
}

REST

 curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse&key=${GEMINI_API_KEY}" 
 -H 'Content-Type: application/json' 
 --no-buffer 
 -d '{
   "contents": [
     {
       "parts": [
         {
           "text": "Explain how AI works"
         }
       ]
     }
   ]
 }'

Apps Scripts

 // See https://developers.google.com/apps-script/guides/properties
// for instructions on how to set the API key.
const apiKey = PropertiesService.getScriptProperties().getProperty('GEMINI_API_KEY');
function main() {
 const payload = {
   contents: [
     {
       parts: [
         { text: 'Explain how AI works' },
       ],
     },
   ],
 };
 const url = `https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?key=${apiKey}`;
 const options = {
   method: 'POST',
   contentType: 'application/json',
   payload: JSON.stringify(payload)
 };
 const response = UrlFetchApp.fetch(url, options);
 const data = JSON.parse(response);
 const content = data['candidates'][0]['content']['parts'][0]['text'];
 console.log(content);
}

4. Multiple rounds of dialogue

With the Gemini SDK, you can collect multiple rounds of questions and answers into one conversation. With the chat format, users can get answers step by step and get help when encountering multi-part questions. This SDK chat implementation provides an interface to track conversation history, but in the background, it uses the same generateContent method to create a response.

The following code example shows the implementation of the basic chat function:

Python

 from google import genai
client = genai.Client(api_key="GEMINI_API_KEY")
chat = client.chats.create(model="gemini-2.0-flash")
response = chat.send_message("I have 2 dogs in my house.")
print(response.text)
response = chat.send_message("How many paws are in my house?")
print(response.text)
for message in chat.get_history():
   print(f'role - {message.role}',end=": ")
   print(message.parts[0].text)

JavaScript

 import { GoogleGenAI } from "@google/genai";
const ai = new GoogleGenAI({ apiKey: "GEMINI_API_KEY" });
async function main() {
 const chat = ai.chats.create({
   model: "gemini-2.0-flash",
   history: [
     {
       role: "user",
       parts: [{ text: "Hello" }],
     },
     {
       role: "model",
       parts: [{ text: "Great to meet you. What would you like to know?" }],
     },
   ],
 });
 const response1 = await chat.sendMessage({
   message: "I have 2 dogs in my house.",
 });
 console.log("Chat response 1:", response1.text);
 const response2 = await chat.sendMessage({
   message: "How many paws are in my house?",
 });
 console.log("Chat response 2:", response2.text);
}
await main();

Go

 model := client.GeneratedModel("gemini-1.5-flash")
cs := model.StartChat()
cs.History = []*genai.Content{
 {
   Parts: []genai.Part{
     genai.Text("Hello, I have 2 dogs in my house."),
   },
   Role: "user",
 },
 {
   Parts: []genai.Part{
     genai.Text("Great to meet you. What would you like to know?"),
   },
   Role: "model",
 },
}
res, err := cs.SendMessage(ctx, genai.Text("How many paws are in my house?"))
if err != nil {
 log.Fatal(err)
}
printResponse(res)

REST

 curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY 
 -H 'Content-Type: application/json' 
 -X POST 
 -d '{
   "contents": [
     {
       "role": "user",
       "parts": [
         {
           "text": "Hello"
         }
       ]
     },
     {
       "role": "model",
       "parts": [
         {
           "text": "Great to meet you. What would you like to know?"
         }
       ]
     },
     {
       "role": "user",
       "parts": [
         {
           "text": "I have two dogs in my house. How many paws are in my house?"
         }
       ]
     }
   ]
 }'

Apps Scripts

 // See https://developers.google.com/apps-script/guides/properties
// for instructions on how to set the API key.
const apiKey = PropertiesService.getScriptProperties().getProperty('GEMINI_API_KEY');
function main() {
 const payload = {
   contents: [
     {
       role: 'user',
       parts: [
         { text: 'Hello' },
       ],
     },
     {
       role: 'model',
       parts: [
         { text: 'Great to meet you. What would you like to know?' },
       ],
     },
     {
       role: 'user',
       parts: [
         { text: 'I have two dogs in my house. How many paws are in my house?' },
       ],
     },
   ],
 };
 const url = `https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=${apiKey}`;
 const options = {
   method: 'POST',
   contentType: 'application/json',
   payload: JSON.stringify(payload)
 };
 const response = UrlFetchApp.fetch(url, options);
 const data = JSON.parse(response);
 const content = data['candidates'][0]['content']['parts'][0]['text'];
 console.log(content);
}

You can also use streaming with chat functionality, as shown in the following example:

Python

 from google import genai
client = genai.Client(api_key="GEMINI_API_KEY")
chat = client.chats.create(model="gemini-2.0-flash")
response = chat.send_message_stream("I have 2 dogs in my house.")
for chunk in response:
   print(chunk.text, end="")
response = chat.send_message_stream("How many paws are in my house?")
for chunk in response:
   print(chunk.text, end="")
for message in chat.get_history():
   print(f'role - {message.role}', end=": ")
   print(message.parts[0].text)

JavaScript

 import { GoogleGenAI } from "@google/genai";
const ai = new GoogleGenAI({ apiKey: "GEMINI_API_KEY" });
async function main() {
 const chat = ai.chats.create({
   model: "gemini-2.0-flash",
   history: [
     {
       role: "user",
       parts: [{ text: "Hello" }],
     },
     {
       role: "model",
       parts: [{ text: "Great to meet you. What would you like to know?" }],
     },
   ],
 });
 const stream1 = await chat.sendMessageStream({
   message: "I have 2 dogs in my house.",
 });
 for await (const chunk of stream1) {
   console.log(chunk.text);
   console.log("_".repeat(80));
 }
 const stream2 = await chat.sendMessageStream({
   message: "How many paws are in my house?",
 });
 for await (const chunk of stream2) {
   console.log(chunk.text);
   console.log("_".repeat(80));
 }
}
await main();

Go

 model := client.GeneratedModel("gemini-1.5-flash")
cs := model.StartChat()
cs.History = []*genai.Content{
 {
   Parts: []genai.Part{
     genai.Text("Hello, I have 2 dogs in my house."),
   },
   Role: "user",
 },
 {
   Parts: []genai.Part{
     genai.Text("Great to meet you. What would you like to know?"),
   },
   Role: "model",
 },
}
iter := cs.SendMessageStream(ctx, genai.Text("How many paws are in my house?"))
for {
 resp, err := iter.Next()
 if err == iterator.Done {
   break
 }
 if err != nil {
   log.Fatal(err)
 }
 printResponse(resp)
}

REST

 curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse&key=$GEMINI_API_KEY 
 -H 'Content-Type: application/json' 
 -X POST 
 -d '{
   "contents": [
     {
       "role": "user",
       "parts": [
         {
           "text": "Hello"
         }
       ]
     },
     {
       "role": "model",
       "parts": [
         {
           "text": "Great to meet you. What would you like to know?"
         }
       ]
     },
     {
       "role": "user",
       "parts": [
         {
           "text": "I have two dogs in my house. How many paws are in my house?"
         }
       ]
     }
   ]
 }'

Apps Scripts

 // See https://developers.google.com/apps-script/guides/properties
// for instructions on how to set the API key.
const apiKey = PropertiesService.getScriptProperties().getProperty('GEMINI_API_KEY');
function main() {
 const payload = {
   contents: [
     {
       role: 'user',
       parts: [
         { text: 'Hello' },
       ],
     },
     {
       role: 'model',
       parts: [
         { text: 'Great to meet you. What would you like to know?' },
       ],
     },
     {
       role: 'user',
       parts: [
         { text: 'I have two dogs in my house. How many paws are in my house?' },
       ],
     },
   ],
 };
 const url = `https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?key=${apiKey}`;
 const options = {
   method: 'POST',
   contentType: 'application/json',
   payload: JSON.stringify(payload)
 };
 const response = UrlFetchApp.fetch(url, options);
 const data = JSON.parse(response);
 const content = data['candidates'][0]['content']['parts'][0]['text'];
 console.log(content);
}

V. Configuration parameters

Each prompt you send to the model contains parameters that control how the model generates an answer. You can configure these parameters or let the model use default options.

The following example shows how to configure model parameters:

Python

 from google import genai
from google.genai import types
client = genai.Client(api_key="GEMINI_API_KEY")
response = client.models.generate_content(
   model="gemini-2.0-flash",
   contents=["Explain how AI works"],
   config=types.GenerateContentConfig(
       max_output_tokens=500,
       temperature=0.1
   )
)
print(response.text)

JavaScript

 import { GoogleGenAI } from "@google/genai";
const ai = new GoogleGenAI({ apiKey: "GEMINI_API_KEY" });
async function main() {
 const response = await ai.models.generateContent({
   model: "gemini-2.0-flash",
   contents: "Explain how AI works",
   config: {
     maxOutputTokens: 500,
     temperature: 0.1,
   },
 });
 console.log(response.text);
}
await main();

Go

 model := client.GeneratedModel("gemini-1.5-pro-latest")
model.SetTemperature(0.9)
model.SetTopP(0.5)
model.SetTopK(20)
model.SetMaxOutputTokens(100)
model.SystemInstruction = genai.NewUserContent(genai.Text("You are Yoda from Star Wars."))
model.ResponseMIMEType = "application/json"
resp, err := model.GenerateContent(ctx, genai.Text("What is the average size of a swallow?"))
if err != nil {
 log.Fatal(err)
}
printResponse(resp)

REST

 curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY 
 -H 'Content-Type: application/json' 
 -X POST 
 -d '{
   "contents": [
     {
       "parts": [
         {
           "text": "Explain how AI works"
         }
       ]
     }
   ],
   "generationConfig": {
     "stopSequences": [
       "Title"
     ],
     "temperature": 1.0,
     "maxOutputTokens": 800,
     "topP": 0.8,
     "topK": 10
   }
 }'

Apps Scripts

 // See https://developers.google.com/apps-script/guides/properties
// for instructions on how to set the API key.
const apiKey = PropertiesService.getScriptProperties().getProperty('GEMINI_API_KEY');
function main() {
 const generationConfig = {
   temperature: 1,
   topP: 0.95,
   topK: 40,
   maxOutputTokens: 8192,
   responseMimeType: 'text/plain',
 };
 const payload = {
   generationConfig,
   contents: [
     {
       parts: [
         { text: 'Explain how AI works in a few words' },
       ],
     },
   ],
 };
 const url = `https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=${apiKey}`;
 const options = {
   method: 'POST',
   contentType: 'application/json',
   payload: JSON.stringify(payload)
 };
 const response = UrlFetchApp.fetch(url, options);
 const data = JSON.parse(response);
 const content = data['candidates'][0]['content']['parts'][0]['text'];
 console.log(content);
}

Here are some model parameters that you can configure. (Name conventions vary by programming language.)

1.stopSequences: Specifies a set of character sequences (up to 5) that will stop generating output. If specified, the API stops when stop_sequence first appears. The stop sequence is not included in the response.

2.temperature: Controls the randomness of output. Use higher values ​​to get more creative answers, and use lower values ​​to get more definitive answers. The range of values ​​is [0.0, 2.0].

3.maxOutputTokens: Set the upper limit of word elements contained in the candidate.

4.topP: Change the way the model selects output lexical elements. The system selects tokens in the order of the highest to lowest probability until the sum of the probability of the selected token is equal to the value of topP. The default topP value is 0.95.

5.topK: Change the way the model selects output lexical elements. If topK is set to 1, it means that the selected token is the most probable token among all tokens in the model vocabulary; if topK is set to 3, it means that the system will select the next token from the 3 most probable tokens (determined by temperature). The system will further filter word elements according to topP and use temperature sampling to select the final word elements.

6. System commands

With system description, you can control the behavior of your model based on specific use cases. When providing system descriptions, you can provide additional context to the model to help it understand the task and generate more customized answers. The model should follow system instructions during the complete interaction with the user so that you can specify product-level behavior without considering tips provided by the end user.

You can set up system instructions when initializing the model:

Python

 from google import genai
from google.genai import types
client = genai.Client(api_key="GEMINI_API_KEY")
response = client.models.generate_content(
   model="gemini-2.0-flash",
   config=types.GenerateContentConfig(
       system_instruction="You are a cat. Your name is Neko."),
   contents="Hello there"
)
print(response.text)

JavaScript

 import { GoogleGenAI } from "@google/genai";
const ai = new GoogleGenAI({ apiKey: "GEMINI_API_KEY" });
async function main() {
 const response = await ai.models.generateContent({
   model: "gemini-2.0-flash",
   contents: "Hello there",
   config: {
     systemInstruction: "You are a cat. Your name is Neko.",
   },
 });
 console.log(response.text);
}
await main();

Go

 // import packages here
func main() {
 ctx := context.Background()
 client, err := genai.NewClient(ctx, option.WithAPIKey(os.Getenv("GEMINI_API_KEY")))
 if err != nil {
   log.Fatal(err)
 }
 defer client.Close()
 model := client.GeneratedModel("gemini-2.0-flash")
 model.SystemInstruction = &genai.Content{
   Parts: []genai.Part{genai.Text(`
     You are a cat. Your name is Neko.
   `)},
 }
 resp, err := model.GenerateContent(ctx, genai.Text("Hello there"))
 if err != nil {
   log.Fatal(err)
 }
 printResponse(resp) // helper function for printing content parts
}

REST

 curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" 
 -H 'Content-Type: application/json' 
 -d '{
   "system_instruction": {
     "parts": [
       {
         "text": "You are a cat. Your name is Neko."
       }
     ]
   },
   "contents": [
     {
       "parts": [
         {
           "text": "Hello there"
         }
       ]
     }
   ]
 }'

Apps Scripts

 // See https://developers.google.com/apps-script/guides/properties
// for instructions on how to set the API key.
const apiKey = PropertiesService.getScriptProperties().getProperty('GEMINI_API_KEY');
function main() {
 const systemInstruction = {
   parts: [{
     text: 'You are a cat. Your name is Neko.'
   }]
 };
 const payload = {
   systemInstruction,
   contents: [
     {
       parts: [
         { text: 'Hello there' },
       ],
     },
   ],
 };
 const url = `https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=${apiKey}`;
 const options = {
   method: 'POST',
   contentType: 'application/json',
   payload: JSON.stringify(payload)
 };
 const response = UrlFetchApp.fetch(url, options);
 const data = JSON.parse(response);
 const content = data['candidates'][0]['content']['parts'][0]['text'];
 console.log(content);
}

You can then send requests to the model as usual.

7. Tips

The entire Gemini series model supports text generation.

For basic text generation application scenarios, your question may not need to include any output examples, system descriptions, or format information. This is a zero-sample method. For some use cases, a single or few sample prompt may generate output that is more in line with the user's expectations. In some cases, you may also need to provide system instructions to help the model understand the task or follow specific guidelines.