1
1
import * as gptscript from "../src/gptscript"
2
2
import {
3
3
ArgumentSchemaType ,
4
- Credential , CredentialType ,
4
+ CredentialType ,
5
5
getEnv ,
6
6
PropertyType ,
7
7
RunEventType ,
@@ -12,7 +12,7 @@ import {
12
12
import path from "path"
13
13
import { fileURLToPath } from "url"
14
14
import * as fs from "node:fs"
15
- import { randomBytes } from "node:crypto" ;
15
+ import { randomBytes } from "node:crypto"
16
16
17
17
let gFirst : gptscript . GPTScript
18
18
let g : gptscript . GPTScript
@@ -172,6 +172,17 @@ describe("gptscript module", () => {
172
172
const result = await ( await g . run ( testGptPath ) ) . text ( )
173
173
expect ( result ) . toBeDefined ( )
174
174
expect ( result ) . toContain ( "Calvin Coolidge" )
175
+
176
+ // Run it a second time and expect a cached result
177
+ const run = await g . run ( testGptPath )
178
+ const secondResult = await run . text ( )
179
+ expect ( result ) . toBeDefined ( )
180
+ expect ( secondResult ) . toStrictEqual ( result )
181
+
182
+ // There should be one call frame, and it should be cached
183
+ for ( let c in run . calls ) {
184
+ expect ( run . calls [ c ] . chatResponseCached ) . toBeTruthy ( )
185
+ }
175
186
} )
176
187
177
188
test ( "should override credentials correctly" , async ( ) => {
@@ -192,6 +203,7 @@ describe("gptscript module", () => {
192
203
test ( "run executes and stream a file correctly" , async ( ) => {
193
204
let out = ""
194
205
let err = undefined
206
+ let [ promptTokens , completionTokens , totalTokens ] = [ 0 , 0 , 0 ]
195
207
const testGptPath = path . join ( __dirname , "fixtures" , "test.gpt" )
196
208
const opts = {
197
209
disableCache : true ,
@@ -204,8 +216,17 @@ describe("gptscript module", () => {
204
216
await run . text ( )
205
217
err = run . err
206
218
219
+ for ( let c in run . calls ) {
220
+ promptTokens += run . calls [ c ] . usage . promptTokens || 0
221
+ completionTokens += run . calls [ c ] . usage . completionTokens || 0
222
+ totalTokens += run . calls [ c ] . usage . totalTokens || 0
223
+ }
224
+
207
225
expect ( out ) . toContain ( "Calvin Coolidge" )
208
226
expect ( err ) . toEqual ( "" )
227
+ expect ( promptTokens ) . toBeGreaterThan ( 0 )
228
+ expect ( completionTokens ) . toBeGreaterThan ( 0 )
229
+ expect ( totalTokens ) . toBeGreaterThan ( 0 )
209
230
} )
210
231
211
232
test ( "run executes and streams a file with global tools correctly" , async ( ) => {
@@ -273,9 +294,17 @@ describe("gptscript module", () => {
273
294
instructions : "${question}"
274
295
}
275
296
276
- const response = await ( await g . evaluate ( [ t0 , t1 ] ) ) . text ( )
297
+ const run = await g . evaluate ( [ t0 , t1 ] )
298
+ const response = await run . text ( )
277
299
expect ( response ) . toBeDefined ( )
278
300
expect ( response ) . toContain ( "Calvin Coolidge" )
301
+
302
+ // In this case, we expect the total number of tool results to be 1
303
+ let toolResults = 0
304
+ for ( let c in run . calls ) {
305
+ toolResults += run . calls [ c ] . toolResults
306
+ }
307
+ expect ( toolResults ) . toStrictEqual ( 1 )
279
308
} , 30000 )
280
309
281
310
test ( "with sub tool" , async ( ) => {
0 commit comments