
OpenAI ChatGPT: Chat
This item sends a message to OpenAI ChatGPT and stores the response in the specified data item.
Basic Configs
- Step Name
- Note
Configs for this Auto Step
- conf_Auth
- C1: Authorization Setting in which Project API Key is set *
- conf_OrganizationId
- C-deprecated: Organization ID (Default organization if blank)
- conf_Model
- C2: Model *
- conf_MaxTokens
- C3: Maximum number of tokens to consume (2048 if blank)
- conf_ReasoningEffort
- C4: Reasoning Effort (Only for o-, gpt-5 and later series)
- conf_Verbosity
- C5: Verbosity (Only for gpt-5 and later series)
- conf_Temperature
- C6: Temperature (0.0 – 2.0) (1.0 if blank)
- conf_StopSequences
- C7: Stop Sequences (write one per line, up to four)
- conf_GPT_Role
- C8: Role for ChatGPT#{EL}
- conf_Message1
- U1: User Message *#{EL}
- conf_Images1
- I1: Images to attach to user message
- conf_Images1Detail
- I1-D: Fidelity of image understanding (auto if not selected)
- conf_Answer1
- A1: Data item to save response from ChatGPT *
Notes
- Project API Keys can be created here
- In [C2: Model], if you want to use a model that is not listed in the pull-down menu, select “Fixed Value” and enter the model name in the input field.
- Model names should be written according to OpenAI documentation
- See the “Models” in the OpenAI Platform documentation
- Only models accessible through the Chat Completions endpoints can be used
- For the endpoints, please refer to the details of each model’s details in the OpenAI documentation above
- Model names should be written according to OpenAI documentation
- [I1: Images to attach to user message] is only available on certain models
- For more details, please refer to the OpenAI documentation
Capture

See Also
Script (click to open)
- An XML file that contains the code below is available to download
- chatgpt-chat-completion.xml (C) Questetra, Inc. (MIT License)
- If you are using Professional, you can modify the contents of this file and use it as your own add-on auto step
const MAX_TOKENS_DEFAULT = 2048;
const MAX_IMAGE_SIZE = 20971520; // ChatGPT の制限。1 ファイルにつき 20 MB まで
const AVAILABLE_IMAGE_TYPES = ['image/jpeg', 'image/png', 'image/gif', 'image/webp'];
function main() {
////// == 工程コンフィグ・ワークフローデータの参照 / Config & Data Retrieving ==
const auth = configs.getObject('conf_Auth'); /// REQUIRED
const organizationId = configs.get('conf_OrganizationId');
const model = configs.get('conf_Model');
const maxTokens = retrieveMaxTokens();
const reasoningEffort = retrieveSelectItem('conf_ReasoningEffort');
const verbosity = retrieveSelectItem('conf_Verbosity');
const temperature = retrieveTemperature();
const stopSequences = retrieveStopSequences();
// https://platform.openai.com/docs/guides/safety-best-practices
// Sending end-user IDs in your requests can be a useful tool to help OpenAI monitor and detect abuse.
const requestUser = `m${processInstance.getProcessModelInfoId().toString()}`;
const gptRole = configs.get('conf_GPT_Role'); // NotRequired
const message1 = configs.get('conf_Message1'); /// REQUIRED
if (message1 === '') {
throw new Error('User Message is empty.');
}
const imageUrls1 = retrieveImageUrls('conf_Images1');
const imageDetail1 = retrieveSelectItem('conf_Images1Detail');
////// == 演算 / Calculating ==
const answer1 = createChat(auth, organizationId, model, maxTokens, reasoningEffort, verbosity, temperature, stopSequences, requestUser, gptRole, message1, imageUrls1, imageDetail1);
////// == ワークフローデータへの代入 / Data Updating ==
saveData('conf_Answer1', answer1 ?? '');
}
/**
* SELECT_ITEM の設定値を取得する
* 空の場合、undefined を返す
* @param configName
* @returns {undefined|String}
*/
const retrieveSelectItem = (configName) => {
const value = configs.get(configName);
if (value === '') {
return undefined;
}
return value;
};
/**
* config から、max_completion_tokens として設定する最大トークン数を読み出す
* max_tokens は Deprecated
* 指定なしの場合は 2048 を返す
* @returns {Number}
*/
const retrieveMaxTokens = () => {
const maxTokens = configs.get('conf_MaxTokens');
if (maxTokens === '') {
return MAX_TOKENS_DEFAULT;
}
const regExp = new RegExp(/^[1-9][0-9]*$/);
if (!regExp.test(maxTokens)) {
throw new Error('Maximum number of tokens must be a positive integer.');
}
return parseInt(maxTokens, 10);
};
/**
* config から温度を読み出す
* 指定なしの場合、1 を返す
* @returns {Number}
*/
const retrieveTemperature = () => {
const temperature = configs.get('conf_Temperature');
if (temperature === '') {
return 1;
}
const regExp = /^([0-1](\.\d+)?|2(\.0+)?)$/;
if (!regExp.test(temperature)) {
throw new Error('Temperature must be a number from 0 to 2.');
}
return parseFloat(temperature);
};
const MAX_STOP_SEQUENCE_NUM = 4;
/**
* config から停止シーケンスを読み出す
* @returns {Array<String>}
*/
const retrieveStopSequences = () => {
const stopSequencesStr = configs.get('conf_StopSequences');
if (stopSequencesStr === '') {
return [];
}
const stopSequences = stopSequencesStr.split('\n')
.filter(s => s !== '');
if (stopSequences.length > MAX_STOP_SEQUENCE_NUM) {
throw new Error(`Too many stop sequences. The maximum number is ${MAX_STOP_SEQUENCE_NUM}.`);
}
return stopSequences;
};
/**
* config から添付画像を読み出し、image_url パラメータに設定する文字列の配列として返す
* @param configName
* @returns {Array<String>} imageUrls
*/
const retrieveImageUrls = (configName) => {
const imagesDef = configs.getObject(configName);
if (imagesDef === null) {
return [];
}
const images = engine.findData(imagesDef);
if (images === null) {
return [];
}
const imageUrls = [];
images.forEach(image => {
if (image.getLength() > MAX_IMAGE_SIZE) {
throw new Error(`Attached image "${image.getName()}" is too large. Each file must be less than ${MAX_IMAGE_SIZE} bytes.`);
}
const contentType = image.getContentType();
if (!AVAILABLE_IMAGE_TYPES.includes(contentType)) {
throw new Error(`Content-Type of "${image.getName()}" is not supported. Supported types are: ${AVAILABLE_IMAGE_TYPES.join(', ')}.`);
}
imageUrls.push(`data:${contentType};base64,${base64.encodeToString(fileRepository.readFile(image))}`);
});
return imageUrls;
};
/**
* チャットの実行
* @param auth
* @param organizationId
* @param model
* @param maxTokens
* @param reasoningEffort
* @param verbosity
* @param temperature
* @param stopSequences
* @param requestUser
* @param gptRole
* @param message1
* @param imageUrls1
* @param imageDetail1
* @returns {String} answer1
*/
const createChat = (auth, organizationId, model, maxTokens, reasoningEffort, verbosity, temperature, stopSequences, requestUser, gptRole, message1, imageUrls1, imageDetail1) => {
//// OpenAI API > Documentation > API REFERENCE > CHAT
//// https://platform.openai.com/docs/api-reference/chat
/// prepare json
const requestJson = {
safety_identifier: requestUser,
model,
n: 1,
max_completion_tokens: maxTokens,
reasoning_effort: reasoningEffort,
verbosity,
temperature,
messages: []
};
if (gptRole !== '') {
requestJson.messages.push({
role: 'system',
content: gptRole
});
}
if (stopSequences.length > 0) { // モデルによっては、空配列をセットするとエラーになる
requestJson.stop = stopSequences;
}
const message1Content = [];
message1Content.push({
type: 'text',
text: message1
});
imageUrls1.forEach(imageUrl => {
const imageObj = {
type: 'image_url',
image_url: {
url: imageUrl
}
};
imageObj.image_url.detail = imageDetail1;
message1Content.push(imageObj);
});
requestJson.messages.push({
role: 'user',
content: message1Content
});
let request = httpClient.begin().authSetting(auth)
.body(JSON.stringify(requestJson), 'application/json');
if (organizationId !== null && organizationId !== '') {
request = request.header('OpenAI-Organization', organizationId);
}
const response = request.post('https://api.openai.com/v1/chat/completions');
const responseCode = response.getStatusCode();
const responseBody = response.getResponseAsString();
if (responseCode !== 200) {
engine.log(responseBody);
throw new Error(`Failed to request. status: ${responseCode}`);
}
const {choices, usage} = JSON.parse(responseBody);
const finishReason = choices[0].finish_reason;
const answer1 = choices[0].message.content;
engine.log(`Finish Reason: ${finishReason}`);
engine.log(`Prompt Tokens: ${usage.prompt_tokens}`);
engine.log(`Completion Tokens: ${usage.completion_tokens}`);
engine.log(`Completion Tokens Details: ${JSON.stringify(usage.completion_tokens_details)}`);
if (answer1 === undefined || answer1 === null || answer1 === '') {
throw new Error(`No response content generated. Finish Reason: ${finishReason}`);
}
return answer1;
};
/**
* データ項目への保存
* @param configName
* @param data
*/
const saveData = (configName, data) => {
const def = configs.getObject(configName);
if (def === null) {
return;
}
engine.setData(def, data);
};