Azure OpenAI Service #Chat: Interact with Parameters
Azure OpenAI Service #Chat: パラメータ付き対話
Communicates with Azure OpenAI Service API (ChatGPT running on Microsoft infrastructure). Supports the assignment of advanced parameters. Specifically, sampling temperature, upper % sampling, recurrence prohibition, frequency prohibition and Logit bias can be set.
Configs for this Auto Step
- AuthzConfU1
- U1: HTTP_Authz Setting (Secret API Key as “Fixed Value”) *
- StrConfU2
- U2: Resource Name *#{EL}
- StrConfU3
- U3: Deply Id *#{EL}
- StrConfU4
- U4: API Version (default “2023-05-15”)#{EL}
- StrConfA0
- A0: Responder Role (SYSTEM Role)#{EL}
- StrConfA1
- A1: Request Message PROMPT *#{EL}
- StrConfA2
- A2: Set Parameters (Temp Top_P P-penalty F-penalty) in 4 lines#{EL}
- StrConfA3
- A3: Set LogitBias (TokenID and Bias value pairs) for each line#{EL}
- StrConfA4
- A4: Number of Responses (default 1)#{EL}
- StrConfA5
- A5: Limit of Response Tokens (default 2048)#{EL}
- StrConfA6
- A6: Set Stop Words (eg “.”)#{EL}
- StrConfB1
- B1: Set FieldNames that store COMPLETION for each lines (update)#{EL}
- SelectConfB2
- B2: STRING Data item to store Response JSON (update)
- SelectConfC1
- C1: NUMERIC Data item to store PROMPT Tokens (update)
- SelectConfC2
- C2: NUMERIC Data item to store COMPLETION Tokens (update)
- SelectConfC3
- C3: NUMERIC Data item to store Total Tokens (update)
Script (click to open)
// GraalJS Script (engine type: 3)
//////// START "main()" /////////////////////////////////////////////////////////////////
main();
function main(){
////// == Config Retrieving / 工程コンフィグの参照 ==
const strAuthzSetting = configs.get ( "AuthzConfU1" ); /// REQUIRED
engine.log( " AutomatedTask Config: Authz Setting: " + strAuthzSetting );
/*
const strModel = configs.get( "StrConfM" ) !== "" ? // NotRequired
configs.get( "StrConfM" ) : "gpt-4"; // (default)
engine.log( " AutomatedTask Config: OpenAI Model: " + strModel );
*/
const strResourceName = configs.get( "StrConfU2" );
const strDeployId = configs.get( "StrConfU3" );
const strAPIVersion = configs.get( "StrConfU4" ) !== "" ? // NotRequired
configs.get( "StrConfU4" ) : "2023-05-15"; // (default)
const strSystemRole = configs.get ( "StrConfA0" ); // NotRequired
/*
const strLogPro1 = configs.get ( "StrConfPro1" ); // NotRequired
const strLogCom1 = configs.get ( "StrConfCom1" ); // NotRequired
const strLogPro2 = configs.get ( "StrConfPro2" ); // NotRequired
const strLogCom2 = configs.get ( "StrConfCom2" ); // NotRequired
const strLogPro3 = configs.get ( "StrConfPro3" ); // NotRequired
const strLogCom3 = configs.get ( "StrConfCom3" ); // NotRequired
*/
const strPrompt = configs.get ( "StrConfA1" ); /// REQUIRED
if( strPrompt === "" ){
throw new Error( "\n AutomatedTask ConfigError:" +
" Config {A1:Prompt} MUST NOT be empty \n" );
}
const strParams = configs.get ( "StrConfA2" ); // NotRequired
const arrParams = strParams !== "" ? strParams.split("\n") : null;
const numTemperature = isNaN(parseFloat(arrParams?.[0])) ? 1 : parseFloat( arrParams[0] );
const numTopP = isNaN(parseFloat(arrParams?.[1])) ? 1 : parseFloat( arrParams[1] );
const numPresPenalty = isNaN(parseFloat(arrParams?.[2])) ? 0 : parseFloat( arrParams[2] );
const numFreqPenalty = isNaN(parseFloat(arrParams?.[3])) ? 0 : parseFloat( arrParams[3] );
// const jsonLogitBias = arrParams?.[4] ? JSON.stringify( arrParams[4] ) : null;
// Number(undefined) // NaN
// Number(null) // 0 ☆
// Number('100a') // NaN
// parseFloat(undefined) // NaN
// parseFloat(null) // NaN
const strBias = configs.get ( "StrConfA3" ); // NotRequired
const arrBias = strBias !== "" ? strBias.split("\n") : null;
const strChoises = configs.get ( "StrConfA4" ); // NotRequired
const numChoises = isNaN(parseInt(strChoises,10)) ? 1 : parseInt(strChoises,10);
const strLimit = configs.get ( "StrConfA5" ); // NotRequired
const numLimit = isNaN(parseInt(strLimit,10)) ? 2048 : parseInt(strLimit,10);
const strStops = configs.get ( "StrConfA6" ); // NotRequired
const arrStops = strStops !== "" ? strStops.split("\n") : null;
const strQfields = configs.get ( "StrConfB1" ); // NotRequired
const arrQfields = strQfields !== "" ? strQfields.split("\n") : null;
const strPocketResponseJson = configs.getObject( "SelectConfB2" ); // NotRequired
const numPocketPromptTokens = configs.getObject( "SelectConfC1" ); // NotRequired
const numPocketCompletionTokens = configs.getObject( "SelectConfC2" ); // NotRequired
const numPocketTotalTokens = configs.getObject( "SelectConfC3" ); // NotRequired
////// == Data Retrieving / ワークフローデータの参照 ==
// (Nothing. Retrieved via Expression Language in Config Retrieving)
////// == Calculating / 演算 ==
//// OpenAI API > Documentation > API REFERENCE > CHAT
//// https://platform.openai.com/docs/api-reference/chat
/// prepare json
let strJson = {};
// strJson.model = strModel;
strJson.messages = [];
if ( strSystemRole !=="" ) {
let objSystemRole = {};
objSystemRole.role = "system";
objSystemRole.content = strSystemRole;
strJson.messages.push ( objSystemRole );
}
/*
if ( strLogPro1 !=="" && strLogCom1 !=="" ) {
let objLogPro = {};
objLogPro.role = "user";
objLogPro.content = strLogPro1;
strJson.messages.push ( objLogPro );
let objLogCom = {};
objLogCom.role = "assistant";
objLogCom.content = strLogCom1;
strJson.messages.push ( objLogCom );
}
if ( strLogPro2 !=="" && strLogCom2 !=="" ) {
let objLogPro = {};
objLogPro.role = "user";
objLogPro.content = strLogPro2;
strJson.messages.push ( objLogPro );
let objLogCom = {};
objLogCom.role = "assistant";
objLogCom.content = strLogCom2;
strJson.messages.push ( objLogCom );
}
if ( strLogPro3 !=="" && strLogCom3 !=="" ) {
let objLogPro = {};
objLogPro.role = "user";
objLogPro.content = strLogPro3;
strJson.messages.push ( objLogPro );
let objLogCom = {};
objLogCom.role = "assistant";
objLogCom.content = strLogCom3;
strJson.messages.push ( objLogCom );
}
*/
let objNewMsg = {};
objNewMsg.role = "user";
objNewMsg.content = strPrompt;
strJson.messages.push ( objNewMsg );
if ( arrParams?.[0] !=="" ) {
strJson.temperature = numTemperature;
}
if ( arrParams?.[1] !=="" ) {
strJson.top_p = numTopP;
}
if ( arrParams?.[2] !=="" ) {
strJson.presence_penalty = numPresPenalty;
}
if ( arrParams?.[3] !=="" ) {
strJson.frequency_penalty = numFreqPenalty;
}
strJson.n = numChoises;
strJson.max_tokens = numLimit;
strJson.user = "m" + processInstance.getProcessModelInfoId().toString();
if ( arrStops !== null ){
strJson.stop = [];
for ( let i = 0; i < arrStops.length; i++ ){
if ( arrStops[i] === "- - -" ){
strJson.stop.push ( "\n" );
}else{
strJson.stop.push ( arrStops[i] );
}
}
}
if ( arrBias !== null ){
strJson.logit_bias = {};
for ( let i = 0; i < arrBias.length; i++ ){
let arrNumParts = arrBias[i].match( /-?\d+/g ); // numbers (including with minus signs)
if (arrNumParts.length >= 2) {
strJson.logit_bias[arrNumParts[0]] = Number(arrNumParts[1]);
}
}
}
//engine.log( JSON.stringify( strJson ) ); // debug
/// prepare request1
let request1Uri = "https://" + strResourceName + ".openai.azure.com/openai/deployments/" + strDeployId + "/chat/completions?api-version=" + strAPIVersion;
const apiKey = httpClient.getOAuth2Token(strAuthzSetting);
let request1 = httpClient.begin(); // HttpRequestWrapper
request1 = request1.header("api-key", apiKey);
request1 = request1.body( JSON.stringify( strJson ), "application/json" );
/// try request1
const response1 = request1.post( request1Uri ); // HttpResponseWrapper
engine.log( " AutomatedTask ApiRequest1 Start: " + request1Uri );
const response1Code = response1.getStatusCode() + ""; // JavaNum to string
const response1Body = response1.getResponseAsString();
engine.log( " AutomatedTask ApiResponse1 Status: " + response1Code );
if( response1Code !== "200"){
throw new Error( "\n AutomatedTask UnexpectedResponseError: " +
response1Code + "\n" + response1Body + "\n" );
}
/// parse response1
const response1Obj = JSON.parse( response1Body );
////// == Data Updating / ワークフローデータへの代入 ==
if( strPocketResponseJson !== null ){
engine.setData( strPocketResponseJson, response1Body );
}
if ( arrQfields !== null ) {
for ( let i = 0; i < response1Obj.choices.length; i++ ) {
if( engine.findDataDefinitionByVarName ( arrQfields?.[i] ) !== null ){
engine.setDataByVarName( arrQfields[i],
response1Obj.choices[i].message.content ?? ""
);
}
}
}
if( numPocketPromptTokens !== null ){
engine.setData( numPocketPromptTokens, new java.math.BigDecimal(
response1Obj.usage.prompt_tokens ?? 0
));
}
if( numPocketCompletionTokens !== null ){
engine.setData( numPocketCompletionTokens, new java.math.BigDecimal(
response1Obj.usage.completion_tokens ?? 0
));
}
if( numPocketTotalTokens !== null ){
engine.setData( numPocketTotalTokens, new java.math.BigDecimal(
response1Obj.usage.total_tokens ?? 0
));
}
// "??": Nullish coalescing operator (ES11)
// https://developer.mozilla.org/docs/Web/JavaScript/Reference/Operators/Nullish_coalescing
} //////// END "main()" /////////////////////////////////////////////////////////////////
Download
- AzureOpenAIService-chat-interact-with-parameters.xml
- 2023-08-25 (C) Questetra, Inc. (MIT License)
(Installing Addon Auto-Steps is available only in Professional edition.)
Notes
- For more information on the Azure OpenAI Service, check this page. ChatGPT and other services provided by OpenAI are available on the Microsoft infrastructure.
- Uses ‘API Key Authentication’. (Works with Questetra BPM Suite Ver. 15.1 or later)
- Refer to the Notes and Appendix of OpenAI #Chat: Interact with Parameters, where the configuration items are almost identical.
- The model (AI engine) is specified when deploying the model on the Azure OpenAI Service side, so it is not a configuration item.
- Set the resource name and deployment ID determined when creating the resource and deploying the model on the Azure OpenAI Service side.
- The default API version is 2023-05-15 only (as of 1 Sep 2023).
Capture
