
Google Vertex AI: Gemini: Chat
Google Vertex AI: Gemini: チャット
This item sends a message to a Gemini model on Google Vertex AI and stores the response in the specified data item.
Basic Configs
- Step Name
- Note
Configs for this Auto Step
- conf_Auth
- C1: Service Account Setting *
- conf_Region
- C2: Region (a region code or ‘global’) *
- conf_ProjectId
- C3: Project ID *
- conf_Model
- C4: Model *
- conf_MaxTokens
- C5: Maximum number of tokens for generating response
- conf_Temperature
- C6: Temperature (0.0 – 2.0)
- conf_StopSequences
- C7: Stop Sequences (write one per line, up to five)
- conf_Message1
- U1: User Message *#{EL}
- conf_Images1
- I1: Files to attach to user message
- conf_Images1Resolution
- I1-R: Resolution of the attached files
- conf_Thought1
- T1: Data item to save summary of thought
- conf_Answer1
- A1: Data item to save response *
Notes
- To set up [C1: Service Account Setting] :
- Prepare a service account on Google Cloud Console
- Role [Vertex AI User] is required
- Create or upload a service account key
- If you create a service account key on Google Cloud Console, you can download a JSON file including necessary information
- Create an OAuth2 JWT Bearer setting on Questetra BPM Suite and set it to C1
- Scope
https://www.googleapis.com/auth/cloud-platformis required - Set up other items as shown in the table below :
- Scope
- Prepare a service account on Google Cloud Console
| Settings on Questetra BPM Suite | Corresponding Information of Google Cloud Service Account Key | Property Name in JSON File | Required or Not |
|---|---|---|---|
| Client ID | OAuth2 Client ID | client_id | Not Required |
| Private Key ID | Key ID | private_key_id | Required |
| Private Key | Private Key | private_key | Required |
| Custom Secret Information 1 | Email Address | client_email | Required |

Capture

See Also
Script (click to open)
- An XML file that contains the code below is available to download
- google-vertexai-gemini-chat.xml (C) Questetra, Inc. (MIT License)
- If you are using Professional, you can modify the contents of this file and use it as your own add-on auto step
const MAX_IMAGE_SIZE = 20971520; // Gemini のインラインファイルの制限。1 ファイルにつき 20 MB まで
const main = () => {
////// == 工程コンフィグ・ワークフローデータの参照 / Config & Data Retrieving ==
const auth = configs.getObject('conf_Auth');
const region = retrieveRegion();
const projectId = configs.get('conf_ProjectId');
const model = retrieveModel();
const maxTokens = retrieveMaxTokens();
const thinkingLevel = configs.get('conf_ThinkingLevel');
const temperature = retrieveTemperature();
const stopSequences = retrieveStopSequences();
const message = configs.get('conf_Message1');
if (message === '') {
throw new Error('User Message is empty.');
}
const inlineImages = retrieveImages();
const mediaResolution = configs.get('conf_Images1Resolution');
////// == 演算 / Calculating ==
const {answer, thought} = invokeModel(
auth,
region,
projectId,
model,
maxTokens,
thinkingLevel,
temperature,
stopSequences,
message,
inlineImages,
mediaResolution
);
////// == ワークフローデータへの代入 / Data Updating ==
saveData('conf_Answer1', answer);
saveData('conf_Thought1', thought);
};
/**
* config からリージョンコードを読み出す
* リージョンコードの形式として不正な場合はエラー
* @return {String}
*/
const retrieveRegion = () => {
const region = configs.get('conf_Region');
// 今後リージョンが増えることも考えて、文字数には余裕をみている
const reg = new RegExp('^(?:global|[a-z]{2,20}-[a-z]{2,20}[1-9]{1,2})$');
if (!reg.test(region)) {
throw new Error('Region Code is invalid.');
}
return region;
};
/**
* config からモデル ID を読み出す
* モデル ID として不正な文字が含まれている場合はエラー
* @return {String}
*/
const retrieveModel = () => {
const model = configs.get('conf_Model');
const reg = new RegExp('^[a-z0-9.-]+$');
if (!reg.test(model)) {
throw new Error('Model includes an invalid character.');
}
return model;
};
/**
* config から最大出力トークン数を読み出す
* 正の整数でない場合、エラー
* 未定義の場合、null を返す
* @returns {Number}
*/
const retrieveMaxTokens = () => {
const maxTokens = configs.get('conf_MaxTokens');
if (maxTokens === '') {
return null;
}
const regExp = new RegExp(/^[1-9][0-9]*$/);
if (!regExp.test(maxTokens)) {
throw new Error('Maximum number of tokens for generating output must be a positive integer.');
}
return parseInt(maxTokens, 10);
};
/**
* config から温度を読み出す
* 未定義の場合、null を返す
* @returns {Number}
*/
const retrieveTemperature = () => {
const temperature = configs.get('conf_Temperature');
if (temperature === '') {
return null;
}
const regExp = /^([0-1](\.\d+)?|2(\.0+)?)$/;
if (!regExp.test(temperature)) {
throw new Error('Temperature must be a number from 0 to 2.');
}
return parseFloat(temperature);
};
/**
* config から停止シーケンスを読み出す
* @returns {Array<String>}
*/
const retrieveStopSequences = () => {
const stopSequencesStr = configs.get('conf_StopSequences');
if (stopSequencesStr === '') {
return [];
}
const stopSequences = stopSequencesStr.split('\n').filter((s) => s !== '');
if (stopSequences.length > 5) {
throw new Error('Too many stop sequences. The maximum number is 5.');
}
return stopSequences;
};
/**
* config から画像・動画を読み出す
* 以下の場合はエラー
* - 添付ファイルの総数が多すぎる場合
* - ファイルサイズが大きすぎる場合
* - 画像でも動画でもないファイルが添付されている場合
* @returns {Array<Object>} インライン画像・動画オブジェクトの配列
*/
const retrieveImages = () => {
const imagesDef = configs.getObject('conf_Images1');
if (imagesDef === null) {
return [];
}
const images = engine.findData(imagesDef);
if (images === null) {
return [];
}
const inlineImages = [];
images.forEach((image) => {
if (image.getLength() > MAX_IMAGE_SIZE) {
throw new Error(
`Attached file "${image.getName()}" is too large. Each file must be less than ${MAX_IMAGE_SIZE} bytes.`
);
}
let contentType = image.getContentType();
if (
!contentType.startsWith('image/') &&
!contentType.startsWith('video/') &&
!contentType.startsWith('audio/') &&
!contentType.startsWith('application/pdf') &&
!contentType.startsWith('text/')
) {
throw new Error(
`Attached file "${image.getName()}" is neither an image, video, audio, PDF, nor text.`
);
}
if (contentType.startsWith('text/')) {
// text/plain に書き換え
contentType = contentType.replace(/^text\/[^;]+/, 'text/plain');
}
const inlineImage = {
mimeType: contentType,
data: base64.encodeToString(fileRepository.readFile(image)),
};
inlineImages.push(inlineImage);
});
return inlineImages;
};
const SCOPE = 'https://www.googleapis.com/auth/cloud-platform';
const URL_TOKEN_REQUEST = 'https://oauth2.googleapis.com/token';
/**
* @param auth HTTP 認証設定
* @returns {any} アクセストークンを含むオブジェクト
*/
const getAccessToken = (auth) => {
const privateKeyId = auth.getPrivateKeyId();
const privateKey = auth.getPrivateKey();
const serviceAccount = auth.getCustomSecret1();
const scope = auth.getScope();
if (scope === null || !scope.split(' ').includes(SCOPE)) {
throw new Error(`Scope ${SCOPE} must be included in the scope.`);
}
if (privateKeyId === '') {
throw new Error('Private Key ID is required.');
}
if (privateKey === '') {
throw new Error('Private Key is required.');
}
if (serviceAccount === '') {
throw new Error('Service Account must be set to Custom Secret 1.');
}
const header = {
alg: 'RS256',
typ: 'at+jwt',
kid: privateKeyId,
};
const now = Math.floor(Date.now() / 1000);
const payload = {
iss: serviceAccount,
aud: URL_TOKEN_REQUEST,
sub: '',
iat: now,
exp: now + 3600,
/**
* https://developers.google.com/identity/protocols/oauth2/service-account#jwt-auth
* "without OAuth" の話だが、OAuth でも 1 hour になるようだ。
* 1 hour より長ければエラー。短ければ、1 hour のトークンが返ってくる。
*/
scope,
};
const keyB = rsa.readKeyFromPkcs8(privateKey);
const assertion = jwt.build(header, payload, keyB);
const response = httpClient
.begin()
.formParam('grant_type', 'urn:ietf:params:oauth:grant-type:jwt-bearer')
.formParam('assertion', assertion)
.post(URL_TOKEN_REQUEST);
const responseText = response.getResponseAsString();
if (response.getStatusCode() !== 200) {
engine.log(responseText);
throw new Error(`Failed to get Access token. status: ${response.getStatusCode()}`);
}
const result = JSON.parse(response.getResponseAsString());
if (result.access_token === undefined) {
engine.log(responseText);
throw new Error(`Failed to get Access token. access token not found.`);
}
return result;
};
/**
* モデルの実行
* @param region
* @param projectId
* @param model
* @param maxTokens
* @param thinkingLevel
* @param temperature
* @param stopSequences
* @param message
* @param inlineImages
* @param mediaResolution
* @returns {String, String} answer, thought
*/
const invokeModel = (
auth,
region,
projectId,
model,
maxTokens,
thinkingLevel,
temperature,
stopSequences,
message,
inlineImages,
mediaResolution
) => {
let domain = 'aiplatform.googleapis.com';
if (region !== 'global') {
domain = `${region}-${domain}`;
}
const URL = `https://${domain}/v1/projects/${projectId}/locations/${region}/publishers/google/models/${model}:streamGenerateContent`;
const generationConfig = {};
if (maxTokens !== null) {
generationConfig.maxOutputTokens = maxTokens;
}
if (temperature !== null) {
generationConfig.temperature = temperature;
}
generationConfig.stopSequences = stopSequences;
if (mediaResolution !== '') {
generationConfig.mediaResolution = mediaResolution;
}
const thinkingConfig = {};
if (configs.getObject('conf_Thought1') !== null) {
thinkingConfig.includeThoughts = true;
}
if (thinkingLevel !== '') {
thinkingConfig.thinkingLevel = thinkingLevel;
}
if (Object.keys(thinkingConfig).length > 0) {
generationConfig.thinkingConfig = thinkingConfig;
}
const payload = {
contents: {
role: 'user',
parts: [
{
text: message,
},
],
},
safetySettings: {
category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
threshold: 'BLOCK_LOW_AND_ABOVE',
},
generationConfig,
};
inlineImages.forEach((inlineImage) => {
payload.contents.parts.push({
inlineData: inlineImage,
});
});
const response = httpClient
.begin()
.oauth2JwtBearer(auth, () => getAccessToken(auth))
.body(JSON.stringify(payload), 'application/json')
.post(URL);
const status = response.getStatusCode();
const respTxt = response.getResponseAsString();
if (status !== 200) {
engine.log(respTxt);
throw new Error(`Failed to invoke model. status: ${status}`);
}
const json = JSON.parse(respTxt);
const answers = [];
const thoughts = [];
for (const { candidates, usageMetadata } of json) {
// 回答本文と思考の要約を取得
if (candidates[0].content === undefined) {
engine.log('No content in the candidate.');
} else {
for (const part of candidates[0].content.parts) {
if (part.text === undefined) {
continue;
} else if (part.thought) {
thoughts.push(part.text);
} else {
answers.push(part.text);
}
}
}
// finishReason とメタデータをログ出力
const finishReason = candidates[0].finishReason;
if (finishReason !== undefined) {
engine.log(`Finish Reason: ${finishReason}`);
}
if (usageMetadata !== undefined) {
if (usageMetadata.promptTokenCount !== undefined) {
engine.log(`Prompt Token Count: ${usageMetadata.promptTokenCount}`);
}
if (usageMetadata.thoughtsTokenCount !== undefined) {
engine.log(`Thoughts Token Count: ${usageMetadata.thoughtsTokenCount}`);
}
if (usageMetadata.candidatesTokenCount !== undefined) {
engine.log(`Candidates Token Count: ${usageMetadata.candidatesTokenCount}`);
}
if (usageMetadata.totalTokenCount !== undefined) {
engine.log(`Total Token Count: ${usageMetadata.totalTokenCount}`);
}
}
}
if (answers.length === 0 || answers.join('') === '') {
throw new Error(`No response content generated.`);
}
const result = {
answer: answers.join('')
};
if (thoughts.length !== 0 && thoughts.join('') !== '') {
result.thought = thoughts.join('');
}
return result;
};
/**
* データ項目への保存
* @param configName
* @param data
*/
const saveData = (configName, data) => {
const def = configs.getObject(configName);
if (def === null) {
return;
}
if (data === undefined) {
engine.setData(def, null);
}
engine.setData(def, data);
};