Skip to content

Commit

Permalink
First test
Browse files Browse the repository at this point in the history
  • Loading branch information
epugh committed Jan 14, 2025
1 parent 6db98e1 commit b6045ef
Show file tree
Hide file tree
Showing 2 changed files with 80 additions and 0 deletions.
47 changes: 47 additions & 0 deletions app/services/llm_service.rb
Original file line number Diff line number Diff line change
@@ -1,14 +1,61 @@
# frozen_string_literal: true

require 'net/http'
require 'json'

class LlmService
def initialize openai_key, _opts = {}
@openai_key = openai_key
end

def make_judgement _system_prompt, _user_prompt
# scott write code.

{
explanation: 'Hi scott',
rating: rand(4),
}
end

# rubocop:disable Metrics/MethodLength
def get_llm_response user_prompt, system_prompt
uri = URI('https://api.openai.com/v1/chat/completions')
headers = {
'Content-Type' => 'application/json',
'Authorization' => "Bearer #{@openai_key}",
}
body = {
model: 'gpt-4',
messages: [
{ role: 'system', content: system_prompt },
{ role: 'user', content: user_prompt }
],
}
response = Net::HTTP.start(uri.host, uri.port, use_ssl: true) do |http|
request = Net::HTTP::Post.new(uri, headers)
request.body = body.to_json
http.request(request)
end
if response.is_a?(Net::HTTPSuccess)
json_response = JSON.parse(response.body)
# puts json_response
content = json_response['choices']&.first&.dig('message', 'content')
# puts content
parsed_content = begin
JSON.parse(content)
rescue StandardError
{}
end

# puts "here is parsed"
# puts parsed_content
{
explanation: parsed_content['response']['explanation'],
judgment: parsed_content['response']['judgment_value'],
}
else
raise "Error: #{response.code} - #{response.message}"
end
end
# rubocop:enable Metrics/MethodLength
end
33 changes: 33 additions & 0 deletions test/services/llm_service_test.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# frozen_string_literal: true

require 'test_helper'

class LlmServiceTest < ActiveSupport::TestCase
let(:user) { users(:judge_judy) }
let(:service) { LlmService.new user.openai_key, {} }


let(:score_data) do
{
all_rated: [ true, false ].sample,
queries: {},
score: (1..100).to_a.sample,
try_number: the_try.try_number,
user_id: user.id,
}
end

describe 'Hacking with Scott' do
test 'can we make it run' do
WebMock.allow_net_connect!
user_prompt = 'Explain why you chose a judgment of 3.'
system_prompt = 'Provide a JSON response with an explanation and a judgment value.'
result = service.get_llm_response(user_prompt, system_prompt)
puts result

assert_equal 3, result[:judgment]

WebMock.disable_net_connect!
end
end
end

0 comments on commit b6045ef

Please sign in to comment.