System Instructions
View Original →أصبحت الآن معاينة Gemini 3.1 Flash-Lite متاحة. تجربة الميزة في AI Studio
الصفحة الرئيسية
Gemini API
المستندات
إرسال ملاحظات
إنشاء النص
يمكن لواجهة Gemini API إنشاء نصوص من النصوص والصور والفيديوهات والملفات الصوتية التي يتم إدخالها.
في ما يلي مثال أساسي:
Python
from google import genai
client = genai.Client()
response = client.models.generate_content(
model="gemini-3-flash-preview",
contents="How does AI work?"
)
print(response.text)
JavaScript
import { GoogleGenAI } from "@google/genai";
const ai = new GoogleGenAI({});
async function main() {
const response = await ai.models.generateContent({
model: "gemini-3-flash-preview",
contents: "How does AI work?",
});
console.log(response.text);
}
await main();
Go
package main
import (
"context"
"fmt"
"os"
"google.golang.org/genai"
)
func main() {
ctx := context.Background()
client, err := genai.NewClient(ctx, nil)
if err != nil {
log.Fatal(err)
}
result, _ := client.Models.GenerateContent(
ctx,
"gemini-3-flash-preview",
genai.Text("Explain how AI works in a few words"),
nil,
)
fmt.Println(result.Text())
}
جافا
import com.google.genai.Client;
import com.google.genai.types.GenerateContentResponse;
public class GenerateContentWithTextInput {
public static void main(String[] args) {
Client client = new Client();
GenerateContentResponse response =
client.models.generateContent("gemini-3-flash-preview", "How does AI work?", null);
System.out.println(response.text());
}
}
REST
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-3-flash-preview:generateContent" \
-H "x-goog-api-key: $GEMINI_API_KEY" \
-H 'Content-Type: application/json' \
-X POST \
-d '{
"contents": [
{
"parts": [
{
"text": "How does AI work?"
}
]
}
]
}'
برمجة التطبيقات
// See https://developers.google.com/apps-script/guides/properties
// for instructions on how to set the API key.
const apiKey = PropertiesService.getScriptProperties().getProperty('GEMINI_API_KEY');
function main() {
const payload = {
contents: [
{
parts: [
{ text: 'How AI does work?' },
],
},
],
};
const url = 'https://generativelanguage.googleapis.com/v1beta/models/gemini-3-flash-preview:generateContent';
const options = {
method: 'POST',
contentType: 'application/json',
headers: {
'x-goog-api-key': apiKey,
},
payload: JSON.stringify(payload)
};
const response = UrlFetchApp.fetch(url, options);
const data = JSON.parse(response);
const content = data['candidates'][0]['content']['parts'][0]['text'];
console.log(content);
}
التفكير باستخدام Gemini
تتضمّن نماذج Gemini غالبًا ميزة "التفكير" مفعّلة تلقائيًا، ما يتيح للنموذج الاستدلال قبل الرد على الطلب.
يتوافق كل نموذج مع إعدادات تفكير مختلفة تمنحك التحكّم في التكلفة ووقت الاستجابة والذكاء. لمزيد من التفاصيل، اطّلِع على
دليل التفكير.Python
from google import genai
from google.genai import types
client = genai.Client()
response = client.models.generate_content(
model="gemini-3-flash-preview",
contents="How does AI work?",
config=types.GenerateContentConfig(
thinking_config=types.ThinkingConfig(thinking_level="low")
),
)
print(response.text)
JavaScript
import { GoogleGenAI, ThinkingLevel } from "@google/genai";
const ai = new GoogleGenAI({});
async function main() {
const response = await ai.models.generateContent({
model: "gemini-3-flash-preview",
contents: "How does AI work?",
config: {
thinkingConfig: {
thinkingLevel: ThinkingLevel.LOW,
},
}
});
console.log(response.text);
}
await main();
Go
package main
import (
"context"
"fmt"
"os"
"google.golang.org/genai"
)
func main() {
ctx := context.Background()
client, err := genai.NewClient(ctx, nil)
if err != nil {
log.Fatal(err)
}
thinkingLevelVal := "low"
result, _ := client.Models.GenerateContent(
ctx,
"gemini-3-flash-preview",
genai.Text("How does AI work?"),
&genai.GenerateContentConfig{
ThinkingConfig: &genai.ThinkingConfig{
ThinkingLevel: &thinkingLevelVal,
},
}
)
fmt.Println(result.Text())
}
جافا
import com.google.genai.Client;
import com.google.genai.types.GenerateContentConfig;
import com.google.genai.types.GenerateContentResponse;
import com.google.genai.types.ThinkingConfig;
import com.google.genai.types.ThinkingLevel;
public class GenerateContentWithThinkingConfig {
public static void main(String[] args) {
Client client = new Client();
GenerateContentConfig config =
GenerateContentConfig.builder()
.thinkingConfig(ThinkingConfig.builder().thinkingLevel(new ThinkingLevel("low")))
.build();
GenerateContentResponse response =
client.models.generateContent("gemini-3-flash-preview", "How does AI work?", config);
System.out.println(response.text());
}
}
REST
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-3-flash-preview:generateContent" \
-H "x-goog-api-key: $GEMINI_API_KEY" \
-H 'Content-Type: application/json' \
-X POST \
-d '{
"contents": [
{
"parts": [
{
"text": "How does AI work?"
}
]
}
],
"generationConfig": {
"thinkingConfig": {
"thinkingLevel": "low"
}
}
}'
برمجة التطبيقات
// See https://developers.google.com/apps-script/guides/properties
// for instructions on how to set the API key.
const apiKey = PropertiesService.getScriptProperties().getProperty('GEMINI_API_KEY');
function main() {
const payload = {
contents: [
{
parts: [
{ text: 'How AI does work?' },
],
},
],
generationConfig: {
thinkingConfig: {
thinkingLevel: 'low'
}
}
};
const url = 'https://generativelanguage.googleapis.com/v1beta/models/gemini-3-flash-preview:generateContent';
const options = {
method: 'POST',
contentType: 'application/json',
headers: {
'x-goog-api-key': apiKey,
},
payload: JSON.stringify(payload)
};
const response = UrlFetchApp.fetch(url, options);
const data = JSON.parse(response);
const content = data['candidates'][0]['content']['parts'][0]['text'];
console.log(content);
}
تعليمات النظام والإعدادات الأخرى
يمكنك توجيه سلوك نماذج Gemini باستخدام تعليمات النظام. لإجراء ذلك، مرِّر عنصر GenerateContentConfig.
Python
from google import genai
from google.genai import types
client = genai.Client()
response = client.models.generate_content(
model="gemini-3-flash-preview",
config=types.GenerateContentConfig(
system_instruction="You are a cat. Your name is Neko."),
contents="Hello there"
)
print(response.text)
JavaScript
import { GoogleGenAI } from "@google/genai";
const ai = new GoogleGenAI({});
async function main() {
const response = await ai.models.generateContent({
model: "gemini-3-flash-preview",
contents: "Hello there",
config: {
systemInstruction: "You are a cat. Your name is Neko.",
},
});
console.log(response.text);
}
await main();
Go
package main
import (
"context"
"fmt"
"os"
"google.golang.org/genai"
)
func main() {
ctx := context.Background()
client, err := genai.NewClient(ctx, nil)
if err != nil {
log.Fatal(err)
}
config := &genai.GenerateContentConfig{
SystemInstruction: genai.NewContentFromText("You are a cat. Your name is Neko.", genai.RoleUser),
}
result, _ := client.Models.GenerateContent(
ctx,
"gemini-3-flash-preview",
genai.Text("Hello there"),
config,
)
fmt.Println(result.Text())
}
جافا
import com.google.genai.Client;
import com.google.genai.types.Content;
import com.google.genai.types.GenerateContentConfig;
import com.google.genai.types.GenerateContentResponse;
import com.google.genai.types.Part;
public class GenerateContentWithSystemInstruction {
public static void main(String[] args) {
Client client = new Client();
GenerateContentConfig config =
GenerateContentConfig.builder()
.systemInstruction(
Content.fromParts(Part.fromText("You are a cat. Your name is Neko.")))
.build();
GenerateContentResponse response =
client.models.generateContent("gemini-3-flash-preview", "Hello there", config);
System.out.println(response.text());
}
}
REST
``
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-3-flash-preview:generateContent" \
-H "x-goog-api-key: $GEMINI_API_KEY" \
-H 'Content-Type: application/json' \
-d '{
"system_instruction": {
"parts": [
{"text": "You are a cat. Your name is Neko."
} ]},
"contents": [
{"parts": [
{Related Articles
StruQ: Defending Against Prompt Injection with Structured Queries
Recent advances in Large Language Models (LLMs) enable exciting LLM-integrated applications, which perform text-based tasks by utilizing their advanced language understanding capabilities. However,...
How to work with large language models
[Large language models][Large language models Blog Post] are functions that map text to text. Given an input string of text, a large language model predicts the text that should come next.
Techniques to improve reliability
When GPT-3 fails on a task, what should you do?
Related resources from around the web
People are writing great tools and papers for improving outputs from GPT. Here are some cool ones we've seen: