Oak & Replit: [WIP] Replit-AI API Wrapper in Oak

I’ve recently started working on more Replit based Oak-ware.
Stumbled upon Replit AI for TS.

Decided to search for the src, and I begin my recode:

// WIP - NOT FUNCTIONAL YET
// SPCFORK - OMG GUYS!!!
// 1:54 AM 23/09/08

std := import('std')
str := import('str')
json := import('json')
fmt := import('fmt')

// Models Available!
replModels := {
  Chat: atom('chat-bison')
}

ChatModel := [replModels.Chat]

baseUrl := env().MODEL_FARM_URL |> std.default('https://production-modelfarm.replit.com/')

isDeployment := if env().REPLIT_DEPLOYMENT {
  'true' -> true
  'false' -> false
}

////
// AI RES TYPES
////



fn RequestError() {
  {
    message: ''
    statusCode: 0
  }
}

// Message Interface
fn ChatMessage() {
  {
    content: string
    author: string
  }
}

// Message Opts Interface
fn ChatOptions() {
  {
    model?: ChatModel
    context?: ''
    examples?: []
    messages: []
    temperature?: 0
  }
}

////

fn genReplIdentityToken() {
  _res? := exec(
    '$REPLIT_CLI',
    'identity create -audience="modelfarm@replit.com"',
    ''
  )

  _res? |> str.trim()
}

fn getDeploymentToken() {
  _res? := 'http://localhost:1105/getIdentityToken' |> req({
    body: json.parse({ audience: 'modelfarm@replit.com' }),
    headers: {
      'Content-Type': 'application/json',
    },
    method: 'POST',
  })

  _res? |> json.
}

fn makeRequest(urlPath, body, processJSON) {
  urlPath := urlPath |> std.default('')
  body := body |> std.default('')
  // processJSON := processJSON |> std.default('')

  flag? := false

  response := url |> req({
    method: 'POST',
    headers: {
      'Content-Type': 'application/json'
      Authorization: 'Bearer {{ 0 }}' |> fmt.format(getToken())
    },
    body: json.serialize(body),
  })

  if response.status != 200 {
    true -> {
      flag? <- result.Err({
        message: response
        statusCode: response.status
      })
    }
  }

  if response.body {
    _ -> {
      flag? <- result.Err({
        message: 'No response body'
        statusCode: response.status
      })
    }
  }

  // We won't pipe, it's not possible.

  result.Ok(iterator)

}

fn chatStream(options) {
  options := options |> std.default(ChatOptions())

  with chatImpl(options) '/chat_streaming' 
}

fn chat(options) {
  options := options |> std.default(ChatOptions())

  _res := with chatImpl(options) '/chat'

  __failedToChat := false
  if _res {

    ? -> {
      __failedToChat := true
    }

    _ -> {
      
    }

  }

}
2 Likes

cool! would you mind providing api docs for your Replit-AI API Wrapper?

3 Likes

It’s not done yet along with my code, but I plan to post later on the forums when I finish it within < ~1 months time.

(Maybe even this week since I want to use this mostly for me.)
I’ll be using repl.tv to stream it.

3 Likes

You can find the official Replit module here

1 Like

Continuing the discussion from Oak & Replit: [WIP] Replit-AI API Wrapper in Oak:

// SPCFORK - OMG GUYS!!!
// 4:36 PM 23/09/09

std := import('std')
str := import('str')
json := import('json')
fmt := import('fmt')

// Models Available!
replModels := {
  Chat: atom('chat-bison')
}

ChatModel := [replModels.Chat]

baseUrl := env().MODEL_FARM_URL |> std.default('https://production-modelfarm.replit.com/')

isDeployment := if env().REPLIT_DEPLOYMENT {
  'true' -> true
  'false' -> false
}

cachedToken := ?

////
// AI RES TYPES
////

fn Result(value, error, errorExtras) {
  this := []
  if error {
    _ -> {
      this.ok := false
      this.error := error
      this.errorExtras := errorExtras
    }
    ? -> {
      this.ok := true
      this.value := value
    }  
  }

  this
}

fn Err(error, errorExtras) {
  Result(?, error, errorExtras)
}

fn Ok(value) {
  Result(value)
}

////

fn RequestError() {
  {
    message: ''
    statusCode: 0
  }
}

// Message Interface
fn ChatMessage() {
  {
    content: ''
    author: ''
  }
}

// Message Opts Interface
fn ChatOptions() {
  {
    model?: ChatModel
    context?: ''
    examples?: []
    messages: []
    temperature?: 0
  }
}

fn ChatMultipleChoicesOptions() {
  _temp := ChatOptions()

  _temp |> std.merge({
    choicesCount := 0
  })
}

////

fn genReplIdentityToken() {
  _res? := exec(
    '$REPLIT_CLI',
    'identity create -audience="modelfarm@replit.com"',
    ''
  )

  _res? |> str.trim()
}

fn getDeploymentToken() {
  _res? := 'http://localhost:1105/getIdentityToken' |> req({
    body: json.parse({ audience: 'modelfarm@replit.com' }),
    headers: {
      'Content-Type': 'application/json',
    },
    method: 'POST',
  })

  _resjson := _res?.resp.body |> json.parse()

  endresult := ?

  if type(_resjson) {
    ? -> {
      with std.println() 'Expected json to have identity token'
    }

    :object -> {

      if _resjson |> std.contains?('identityToken ') {
        false, ? -> {
          with std.println() 'Expected json to have identity token'
        }

        true -> {

          if _resjson.identityToken  |> type() {
            :string -> {
              endresult <- _resjson.identityToken
            }
            _, ? -> {
              with std.println() 'Expected identity token to be a string'
            }
          }

        }
      }

    }

    _ -> {
      with std.println() 'Expected json to be an object'
    }
  }

  // @Returns
  endresult
}

fn makeRequest(urlPath, body, processJSON) {
  urlPath := urlPath |> std.default('')
  body := body |> std.default('')
  // processJSON := processJSON |> std.default('')

  flag? := false

  response := url |> req({
    method: 'POST',
    headers: {
      'Content-Type': 'application/json'
      Authorization: 'Bearer {{ 0 }}' |> fmt.format(getToken())
    },
    body: json.serialize(body),
  })

  if response.resp.status != 200 {
    true -> {
      flag? <- result.Err({
        message: response
        statusCode: response.status
      })
    }
  }

  if response.resp.body {
    _ -> {
      flag? <- result.Err({
        message: 'No response body'
        statusCode: response.status
      })
    }
  }

  // We won't pipe, it's not possible.

  if flag? -> result.Ok(response.resp.body)

}

fn chatStream(options) {
  options := options |> std.default(ChatOptions())

  with chatImpl(options) '/chat_streaming' 
}

fn chat(options) {
  options := options |> std.default(ChatOptions())

  _res := with chatImpl(options) '/chat'

  __failedToChat := false
  returns? := ?
  
  if _res.resp.ok {

    ? -> {
      __failedToChat := true
       returns? := _res
    }

    _ -> {

      

    }

  }

}

// STILL NOT TRANSLATED
fn chatImpl() {
  makeRequest(
    urlPath,
    {
      model: options.model
      parameters: {
        prompts: [
          {
            context: ''
            messages: options.messages
          }
        ]
        temperature: options.temperature,
        maxOutputTokens: options.maxOutputTokens
        candidateCount: if options |> std.contains?('choicesCount') {
          true -> options.choicesCount
          false -> ?
        }
      },
    },
    fn() {
      if (!json.responses[0]?.candidates[0]?.message) {
        throw new Error('Expected at least one message');
      }

      {
        choices: json.responses(0.candidates.map(({ message }) => ({
          message: {
            content: message.content
            author: message.author
          }
        }))
      }
    }
  )
}
2 Likes

Continuing the discussion from Oak & Replit: [WIP] Replit-AI API Wrapper in Oak:

I have made more waaay more progress.
And plan to utilize my Expressive module as well

// SPCFORK - aLMOst DONE!!!
// 11:50 AM 23/09/12

std := import('std')
str := import('str')
json := import('json')
fmt := import('fmt')

// Models Available!
replModels := {
  Chat: atom('chat-bison')
}

ChatModel := [replModels.Chat]

baseUrl := env().MODEL_FARM_URL |> std.default('https://production-modelfarm.replit.com/')

isDeployment := if env().REPLIT_DEPLOYMENT {
  'true' -> true
  'false' -> false
}

cachedToken := ?

////
// AI RES TYPES
////

fn Result(value, error, errorExtras) {
  this := []
  if error {
    _ -> {
      this.ok := false
      this.error := error
      this.errorExtras := errorExtras
    }
    ? -> {
      this.ok := true
      this.value := value
    }  
  }

  this
}

fn Err(error, errorExtras) {
  Result(?, error, errorExtras)
}

fn Ok(value) {
  Result(value)
}

////

fn RequestError() {
  {
    message: ''
    statusCode: 0
  }
}

// Message Interface
fn ChatMessage() {
  {
    content: ''
    author: ''
  }
}

// Message Opts Interface
fn ChatOptions() {
  {
    model?: ChatModel
    context?: ''
    examples?: []
    messages: []
    temperature?: 0
  }
}

fn ChatMultipleChoicesOptions() {
  _temp := ChatOptions()

  _temp |> std.merge({
    choicesCount := 0
  })
}

////

fn genReplIdentityToken() {
  _res? := exec(
    '$REPLIT_CLI',
    'identity create -audience="modelfarm@replit.com"',
    ''
  )

  _res? |> str.trim()
}

fn getDeploymentToken() {
  _res? := 'http://localhost:1105/getIdentityToken' |> req({
    body: json.parse({ audience: 'modelfarm@replit.com' }),
    headers: {
      'Content-Type': 'application/json',
    },
    method: 'POST',
  })

  _resjson := _res?.resp.body |> json.parse()

  endresult := ?

  if type(_resjson) {
    ? -> {
      with std.println() 'Expected json to have identity token'
    }

    :object -> {

      if _resjson |> std.contains?('identityToken ') {
        false, ? -> {
          with std.println() 'Expected json to have identity token'
        }

        true -> {

          if _resjson.identityToken  |> type() {
            :string -> {
              endresult <- _resjson.identityToken
            }
            _, ? -> {
              with std.println() 'Expected identity token to be a string'
            }
          }

        }
      }

    }

    _ -> {
      with std.println() 'Expected json to be an object'
    }
  }

  // @Returns
  endresult
}

fn makeRequest(urlPath, body, processJSON) {
  urlPath := urlPath |> std.default('')
  body := body |> std.default('')
  // processJSON := processJSON |> std.default('')

  flag? := false

  response := url |> req({
    method: 'POST',
    headers: {
      'Content-Type': 'application/json'
      Authorization: 'Bearer {{ 0 }}' |> fmt.format(getToken())
    },
    body: json.serialize(body),
  })

  if response.resp.status != 200 {
    true -> {
      flag? <- result.Err({
        message: response
        statusCode: response.status
      })
    }
  }

  if response.resp.body {
    _ -> {
      flag? <- result.Err({
        message: 'No response body'
        statusCode: response.status
      })
    }
  }

  // We won't pipe, it's not possible.

  if flag? -> result.Ok(response.resp.body)

}

fn chatStream(options) {
  options := options |> std.default(ChatOptions())

  with chatImpl(options) '/chat_streaming' 
}

fn chat(options) {
  options := options |> std.default(ChatOptions())

  _res := with chatImpl(options) '/chat'

  __failedToChat := false
  returns? := ?
  
  if _res.resp.ok {

    true -> {
      __failedToChat := true
       returns? := _res
    }

    _ -> {

      if len(_res.resp.value) > 1 {
        true -> {
          
        }

        _ -> {
          
        }
      }

    }

  }

  returns?

}

fn chatImpl() {
  makeRequest(
    urlPath,
    {
      model: options.model
      parameters: {
        prompts: [
          {
            context: ''
            messages: options.messages
          }
        ]
        temperature: options.temperature,
        maxOutputTokens: options.maxOutputTokens
        candidateCount: if options |> std.contains?('choicesCount') {
          true -> options.choicesCount
          false -> ?
        }
      },
    },
    fn {
      if !json.responses.(0).candidates.(0).message {
        false -> with std.println() 'Expected json to have identity token'

        _filler := ?

        true -> {
          // tmyk: json.responses -> GET 0 -> candidates -> clensed ...message
          _filler <- with std.filter(json.responses.(0).candidates) fn(item, count) {
            _msgkeys := keys(json.responses.(0).candidates)
            
            if _msgkeys.(count) {
              'content', 'author' -> true
              _ -> false
            }

          }
          
        }

        // TODO: Implement instance-check module for interfaces bases. 
        // We would use the ChatMessage interface.

      }

    }

  )

}

I’ve decided to keep all my files, restart, and paste where needed.
This is to ensure problems with Threading don’t occur.

And the JS/TS version has a structure built around Promises,
where Oak doesn’t have threading and can’t Async.

I’m using the Python version as a reference since it is apparently functional.

Here’s the start to the src/replit/ai/modelfarm/google/language_models /text_embedding_model file in Oak! :deciduous_tree:

std := import('std')

EmbMdl := import('../../EmbeddingModel')

fn TextEmbeddingStatistics {
  {
    token_count: 0
    truncated: false
  }
}

fn TextEmbedding {
  {
    statistics: TextEmbeddingStatistics()
    values: []
  }
}

fn TextEmbeddingModel {
  self := {}

  self._init := fn _init(model_id) {
    self.underlying_model <- EmbMdl.EmbeddingModel(model_id)
  }

  self.from_pretrained := fn from_pretrained(model_id) {
    TextEmbeddingModel(model_id)
  }

  // > "this model only takes in the content parameter and nothing else"
  self.get_embeddings := fn get_embeddings(content) {
    request := self.__ready_input(content)
    // > "since this model only takes the content param, we don't pass kwargs"
    response := self.underlying_model.embed(request)

    self.__ready_response(response)
  }

  self.__ready_input := fn __ready_input(content) {
    // [{'content': x} for x in content]

    _rebuild := []
    with std.map(content) fn (obj, item) {
      _rebuild << {'content': obj}
    }

    _rebuild
  }

  self.__ready_response := fn __ready_response(response) {

    fn transform_response(x) {
      metadata := x.tokenCountMetadata
      tokenCount := metadata.unbilledTokens + metadata.billableTokens
      stats := TextEmbeddingStatistics(tokenCount, x.truncated)
      TextEmbedding(stats, x.values)
    }

    _rebuild := []
    with std.map(response.embeddings) fn (obj, item) {
      _rebuild << transform_response(obj)
    }

    _rebuild
  }

  self
}
1 Like

The text_generation_model file is been recoded!

:deciduous_tree:

std := import('std')
completion_model := import('../../completion_model')

{
  ready_parameters: ready_parameters
} := import('../utils')

{
  TextGenerationResponse: TextGenerationResponse
} := import('../structs')


fn TextGenerationModel {
  self := {}

  // Class representing a Google completion model.
  
  // Methods:
  //    from_pretrained - Loads a pretrained model using its identifier
  //    predict - completes a human-like text given an initial prompt.
  //    async_predict - Async version of the predict method.

  self._init := fn _init(model_id) {
    self.underlying_model := completion_model.CompletionModel(model_id)
  }

  self.from_pretrained := fn self.from_pretrained(model_id) {

    // Creates a Tokenizer from a pretrained model.

    // Args:
    //   model_id (str): The identifier of the pretrained model.

    // Returns:
    //    The TextGenerationModel class instance.
    
    TextGenerationModel(model_id)
  }

  self.predict := fn predict(prompt, kwargs) {
    
    // completes a human-like text given an initial prompt.

    // Args:
    //   prompt (str): The initial text to start the generation.
    
    // > kwargs (dict): the kwargs un-**'d' dict.

    // Returns:
    //   TextGenerationResponse: The model's response containing the completed text.

    parameters := ready_parameters(kwargs)

    response := self.underlying_model.complete([prompt], parameters)

    self._ready_response(response)
  }

  // =====================
  // == ** IMPORTANT ** ==
  // =====================
  // I cannot rewrite the 
  // Py predict_streaming
  // fn, due to no yield.
  // =====================

  self._ready_response := fn _ready_response(response) {
    // Transforms Completion Model's response into a readily usable format.

    // Args:
    //   response (CompletionModelResponse): The original response from the underlying model.

    // Returns:
    //   TextGenerationResponse: The transformed response.

    choice := response.responses.(0).choices.(0)
    safetyAttributes = choice.metadata.safetyAttributes

    safetyCategories := []

    with std.map(safetyAttributes.categories) fn (item, index) {
      safetyCategories << [
        safetyAttributes.categories.(index)
        safetyAttributes.scores.(index)
      ]
    }

    TextGenerationResponse({
        is_blocked: safetyAttributes.blocked
        raw_prediction_response: choice.model_dump()
        safety_attributes: safetyCategories
        text: choice.content
      }
    )
  }

  self
}