Skip to content

Commit 8e582f7

Browse files
committed
[TEST] Add test case for lxm service internal API
- Added positive and negative TCs for lxm service internal API Signed-off-by: hyunil park <hyunil46.park@samsung.com>
1 parent 324bf4a commit 8e582f7

2 files changed

Lines changed: 324 additions & 0 deletions

File tree

tests/capi/meson.build

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,22 @@ if get_option('enable-ml-service')
7676
test('unittest_capi_service_training_offloading', unittest_capi_service_training_offloading, env: testenv, timeout: 100)
7777
endif
7878
endif
79+
80+
# LXM Service Tests
81+
# These tests require both ml-service and llamacpp to be enabled.
82+
llamacpp_dep = dependency('llama', required: false)
83+
if llamacpp_dep.found()
84+
# Note: The source file itself is also conditionally compiled with ENABLE_LLAMACPP.
85+
unittest_capi_lxm_service = executable('unittest_capi_lxm_service',
86+
'unittest_capi_lxm_service.cc',
87+
dependencies: service_unittest_deps,
88+
install: get_option('install-test'),
89+
install_dir: unittest_install_dir
90+
)
91+
test('unittest_capi_lxm_service', unittest_capi_lxm_service, env: testenv, timeout: 120) # Increased timeout for LLM response
92+
else
93+
message('LXM Service tests will be skipped because llama dependency was not found.')
94+
endif
7995
endif
8096

8197
if nnfw_dep.found()
Lines changed: 308 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,308 @@
1+
/* SPDX-License-Identifier: Apache-2.0 */
2+
/**
3+
* @file unittest_capi_lxm_service.cc
4+
* @date 26 JULY 2025
5+
* @brief Unit test for ml-lxm-service.
6+
* @see https://github.com/nnstreamer/api
7+
* @author Hyunil Park <hyunil46.park@samsung.com>
8+
* @bug No known bugs
9+
*/
10+
11+
#include <gtest/gtest.h>
12+
#include <glib.h>
13+
#include <ml-api-service-private.h>
14+
#include <ml-api-service.h>
15+
#include <ml-api-common.h>
16+
#include <string.h>
17+
#include "ml-lxm-service-internal.h"
18+
#include "unittest_util.h"
19+
20+
#if defined(ENABLE_LLAMACPP)
21+
22+
/**
23+
* @brief Internal function to get the model file path.
24+
*/
25+
static gchar *
26+
_get_model_path (const gchar *model_name)
27+
{
28+
const gchar *root_path = g_getenv ("MLAPI_SOURCE_ROOT_PATH");
29+
30+
/* Supposed to run test in build directory. */
31+
if (root_path == NULL)
32+
root_path = "..";
33+
34+
gchar *model_file = g_build_filename (
35+
root_path, "tests", "test_models", "models", model_name, NULL);
36+
37+
return model_file;
38+
}
39+
40+
/**
41+
* @brief Macro to skip testcase if required files are not ready.
42+
*/
43+
#define skip_lxm_tc(tc_name) \
44+
do { \
45+
g_autofree gchar *model_file = _get_model_path ("llama-2-7b-chat.Q2_K.gguf"); \
46+
if (!g_file_test (model_file, G_FILE_TEST_EXISTS)) { \
47+
g_autofree gchar *msg = g_strdup_printf ( \
48+
"Skipping '%s' due to missing model file. " \
49+
"Please download model file from https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF.", \
50+
tc_name); \
51+
GTEST_SKIP () << msg; \
52+
} \
53+
} while (0)
54+
55+
/**
56+
* @brief Test data structure to pass to the callback.
57+
*/
58+
typedef struct {
59+
int token_count;
60+
GString *received_tokens;
61+
} lxm_test_data_s;
62+
63+
/**
64+
* @brief Callback function for LXM service token streaming.
65+
*/
66+
static void
67+
_lxm_token_cb (ml_service_event_e event, ml_information_h event_data, void *user_data)
68+
{
69+
lxm_test_data_s *tdata = (lxm_test_data_s *) user_data;
70+
ml_tensors_data_h data = NULL;
71+
void *_raw = NULL;
72+
size_t _size = 0;
73+
int status;
74+
75+
switch (event) {
76+
case ML_SERVICE_EVENT_NEW_DATA:
77+
ASSERT_TRUE (event_data != NULL);
78+
79+
status = ml_information_get (event_data, "data", &data);
80+
EXPECT_EQ (status, ML_ERROR_NONE);
81+
if (status != ML_ERROR_NONE)
82+
return;
83+
84+
status = ml_tensors_data_get_tensor_data (data, 0U, &_raw, &_size);
85+
EXPECT_EQ (status, ML_ERROR_NONE);
86+
if (status != ML_ERROR_NONE)
87+
return;
88+
89+
if (tdata) {
90+
if (tdata->received_tokens) {
91+
g_string_append_len (tdata->received_tokens, (const char *) _raw, _size);
92+
}
93+
tdata->token_count++;
94+
}
95+
g_print ("%.*s", (int) _size, (char *) _raw); // Print received token
96+
break;
97+
default:
98+
// Handle unknown or unimplemented events if necessary
99+
g_printerr ("Received unhandled LXM service event: %d\n", event);
100+
break;
101+
}
102+
}
103+
104+
/**
105+
* @brief Internal function to run a full LXM session test.
106+
*/
107+
static void
108+
_run_lxm_session_test (const gchar *config_path, const gchar *input_text,
109+
ml_option_h options)
110+
{
111+
ml_lxm_session_h session = NULL;
112+
ml_lxm_prompt_h prompt = NULL;
113+
lxm_test_data_s tdata = { 0, NULL };
114+
int status;
115+
116+
tdata.received_tokens = g_string_new ("");
117+
118+
// 1. Create session
119+
status = ml_lxm_session_create (config_path, NULL, &session);
120+
ASSERT_EQ (status, ML_ERROR_NONE);
121+
ASSERT_TRUE (session != NULL);
122+
123+
// 2. Create prompt
124+
status = ml_lxm_prompt_create (&prompt);
125+
ASSERT_EQ (status, ML_ERROR_NONE);
126+
ASSERT_TRUE (prompt != NULL);
127+
128+
status = ml_lxm_prompt_append_text (prompt, input_text);
129+
ASSERT_EQ (status, ML_ERROR_NONE);
130+
131+
// 3. Generate response
132+
status = ml_lxm_session_respond (session, prompt, options, _lxm_token_cb, &tdata);
133+
ASSERT_EQ (status, ML_ERROR_NONE);
134+
135+
// Wait for the callback to receive data.
136+
// 10 seconds should be enough for a simple response.
137+
g_usleep (10000000U);
138+
139+
// 4. Verify results
140+
EXPECT_GT (tdata.token_count, 0);
141+
EXPECT_GT (tdata.received_tokens->len, 0U);
142+
143+
g_print ("\nReceived total tokens: %d\n", tdata.token_count);
144+
g_print ("Full received text: %s\n", tdata.received_tokens->str);
145+
146+
// 5. Cleanup
147+
status = ml_lxm_prompt_destroy (prompt);
148+
EXPECT_EQ (status, ML_ERROR_NONE);
149+
150+
status = ml_lxm_session_destroy (session);
151+
EXPECT_EQ (status, ML_ERROR_NONE);
152+
153+
if (tdata.received_tokens) {
154+
g_string_free (tdata.received_tokens, TRUE);
155+
}
156+
}
157+
158+
/**
159+
* @brief Test basic flow of LXM service.
160+
*/
161+
TEST (MLLxmService, basicFlow_p)
162+
{
163+
skip_lxm_tc ("basicFlow_p");
164+
165+
g_autofree gchar *config = get_config_path ("config_single_llamacpp.conf");
166+
ASSERT_TRUE (config != NULL);
167+
168+
const gchar input_text[] = "Hello LXM, how are you?";
169+
ml_option_h options = NULL;
170+
int status;
171+
172+
// Create options
173+
status = ml_option_create (&options);
174+
ASSERT_EQ (status, ML_ERROR_NONE);
175+
ASSERT_TRUE (options != NULL);
176+
177+
// Set temperature option
178+
status = ml_option_set (options, "temperature", g_strdup_printf ("%f", 0.8), g_free);
179+
ASSERT_EQ (status, ML_ERROR_NONE);
180+
181+
// Set max_tokens option
182+
status = ml_option_set (options, "max_tokens", g_strdup_printf ("%zu", (size_t)32), g_free);
183+
ASSERT_EQ (status, ML_ERROR_NONE);
184+
185+
_run_lxm_session_test (config, input_text, options);
186+
187+
// Cleanup options
188+
ml_option_destroy (options);
189+
}
190+
191+
192+
/**
193+
* @brief Test LXM service with invalid parameters.
194+
*/
195+
TEST (MLLxmService, invalidParams_n)
196+
{
197+
ml_lxm_session_h session = NULL;
198+
ml_lxm_prompt_h prompt = NULL;
199+
int status;
200+
ml_option_h options = NULL;
201+
g_autofree gchar *valid_config = get_config_path ("config_single_llamacpp.conf");
202+
203+
// Create options for testing
204+
status = ml_option_create (&options);
205+
ASSERT_EQ (status, ML_ERROR_NONE);
206+
ml_option_set (options, "temperature", g_strdup_printf ("%f", 0.5), g_free);
207+
ml_option_set (options, "max_tokens", g_strdup_printf ("%zu", (size_t)10), g_free);
208+
209+
// ml_lxm_session_create
210+
status = ml_lxm_session_create (valid_config, NULL, NULL);
211+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
212+
status = ml_lxm_session_create (NULL, NULL, &session);
213+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
214+
215+
status = ml_lxm_session_create ("non_existent_config.conf", NULL, &session);
216+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
217+
218+
status = ml_lxm_session_create (valid_config, NULL, &session);
219+
if (status == ML_ERROR_NONE) {
220+
// ml_lxm_prompt_create
221+
status = ml_lxm_prompt_create (NULL);
222+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
223+
224+
status = ml_lxm_prompt_create (&prompt);
225+
ASSERT_EQ (status, ML_ERROR_NONE);
226+
227+
// ml_lxm_prompt_append_text
228+
status = ml_lxm_prompt_append_text (NULL, "text");
229+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
230+
status = ml_lxm_prompt_append_text (prompt, NULL);
231+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
232+
233+
// ml_lxm_prompt_append_instruction
234+
status = ml_lxm_prompt_append_instruction (NULL, "instruction");
235+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
236+
status = ml_lxm_prompt_append_instruction (prompt, NULL);
237+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
238+
239+
// ml_lxm_session_set_instructions
240+
status = ml_lxm_session_set_instructions (NULL, "new instructions");
241+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
242+
status = ml_lxm_session_set_instructions (session, NULL);
243+
EXPECT_EQ (status, ML_ERROR_NONE);
244+
status = ml_lxm_session_set_instructions (session, "new instructions");
245+
EXPECT_EQ (status, ML_ERROR_NONE);
246+
247+
248+
// ml_lxm_session_respond
249+
status = ml_lxm_session_respond (NULL, prompt, options, _lxm_token_cb, NULL);
250+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
251+
status = ml_lxm_session_respond (session, NULL, options, _lxm_token_cb, NULL);
252+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
253+
status = ml_lxm_session_respond (session, prompt, options, NULL, NULL);
254+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
255+
256+
// ml_lxm_prompt_destroy
257+
status = ml_lxm_prompt_destroy (NULL);
258+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
259+
status = ml_lxm_prompt_destroy (prompt);
260+
EXPECT_EQ (status, ML_ERROR_NONE);
261+
prompt = NULL;
262+
263+
// ml_lxm_session_destroy
264+
status = ml_lxm_session_destroy (NULL);
265+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
266+
status = ml_lxm_session_destroy (session);
267+
EXPECT_EQ (status, ML_ERROR_NONE);
268+
session = NULL;
269+
} else {
270+
g_print ("Skipping part of invalidParams_n as session creation failed (possibly due to missing models/config).\n");
271+
}
272+
273+
// Cleanup options
274+
ml_option_destroy (options);
275+
}
276+
277+
/**
278+
* @brief Main function to run the test.
279+
*/
280+
int
281+
main (int argc, char **argv)
282+
{
283+
int result = -1;
284+
285+
try {
286+
testing::InitGoogleTest (&argc, argv);
287+
} catch (...) {
288+
g_warning ("catch 'testing::internal::<unnamed>::ClassUniqueToAlwaysTrue'");
289+
}
290+
291+
/* ignore tizen feature status while running the testcases */
292+
set_feature_state (ML_FEATURE, SUPPORTED);
293+
set_feature_state (ML_FEATURE_INFERENCE, SUPPORTED);
294+
set_feature_state (ML_FEATURE_SERVICE, SUPPORTED);
295+
296+
try {
297+
result = RUN_ALL_TESTS ();
298+
} catch (...) {
299+
g_warning ("catch `testing::internal::GoogleTestFailureException`");
300+
}
301+
302+
set_feature_state (ML_FEATURE, NOT_CHECKED_YET);
303+
set_feature_state (ML_FEATURE_INFERENCE, NOT_CHECKED_YET);
304+
set_feature_state (ML_FEATURE_SERVICE, NOT_CHECKED_YET);
305+
306+
return result;
307+
}
308+
#endif /* ENABLE_LLAMACPP */

0 commit comments

Comments
 (0)