mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-02 11:50:37 +08:00
test/wonjongin-text_get_max_tokens code generate
- There is no test code for get_max_tokens of utils, so the unit test code related to the part corresponding to each test branch is added
This commit is contained in:
67
tests/unittest/test_get_max_tokens.py
Normal file
67
tests/unittest/test_get_max_tokens.py
Normal file
@ -0,0 +1,67 @@
|
||||
import pytest
|
||||
from pr_agent.algo.utils import get_max_tokens, MAX_TOKENS
|
||||
import pr_agent.algo.utils as utils
|
||||
|
||||
class TestGetMaxTokens:
|
||||
|
||||
# Test if the file is in MAX_TOKENS
|
||||
def test_model_max_tokens(self, monkeypatch):
|
||||
fake_settings = type('', (), {
|
||||
'config': type('', (), {
|
||||
'custom_model_max_tokens': 0,
|
||||
'max_model_tokens': 0
|
||||
})()
|
||||
})()
|
||||
|
||||
monkeypatch.setattr(utils, "get_settings", lambda: fake_settings)
|
||||
|
||||
model = "gpt-3.5-turbo"
|
||||
expected = MAX_TOKENS[model]
|
||||
|
||||
assert get_max_tokens(model) == expected
|
||||
|
||||
# Test situations where the model is not registered and exists as a custom model
|
||||
def test_model_has_custom(self, monkeypatch):
|
||||
fake_settings = type('', (), {
|
||||
'config': type('', (), {
|
||||
'custom_model_max_tokens': 5000,
|
||||
'max_model_tokens': 0 # 제한 없음
|
||||
})()
|
||||
})()
|
||||
|
||||
monkeypatch.setattr(utils, "get_settings", lambda: fake_settings)
|
||||
|
||||
model = "custom-model"
|
||||
expected = 5000
|
||||
|
||||
assert get_max_tokens(model) == expected
|
||||
|
||||
def test_model_not_max_tokens_and_not_has_custom(self, monkeypatch):
|
||||
fake_settings = type('', (), {
|
||||
'config': type('', (), {
|
||||
'custom_model_max_tokens': 0,
|
||||
'max_model_tokens': 0
|
||||
})()
|
||||
})()
|
||||
|
||||
monkeypatch.setattr(utils, "get_settings", lambda: fake_settings)
|
||||
|
||||
model = "custom-model"
|
||||
|
||||
with pytest.raises(Exception):
|
||||
get_max_tokens(model)
|
||||
|
||||
def test_model_max_tokens_with__limit(self, monkeypatch):
|
||||
fake_settings = type('', (), {
|
||||
'config': type('', (), {
|
||||
'custom_model_max_tokens': 0,
|
||||
'max_model_tokens': 10000
|
||||
})()
|
||||
})()
|
||||
|
||||
monkeypatch.setattr(utils, "get_settings", lambda: fake_settings)
|
||||
|
||||
model = "gpt-3.5-turbo" # this model setting is 160000
|
||||
expected = 10000
|
||||
|
||||
assert get_max_tokens(model) == expected
|
Reference in New Issue
Block a user