Miro Goettler commited on
Commit
3cec7b2
1 Parent(s): cf7b765

Add utils module

Browse files
Files changed (1) hide show
  1. utils.py +45 -0
utils.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from llm_guard.input_scanners import PromptInjection
2
+ from llm_guard.input_scanners.prompt_injection import MatchType
3
+
4
+
5
+ def is_subsequence(main_string, sub_string):
6
+ """
7
+ Checks if sub_string is a subsequence of main_string.
8
+ A subsequence allows arbitrary characters in between the characters of sub_string in main_string.
9
+
10
+ Parameters:
11
+ main_string (str): The string in which to search.
12
+ sub_string (str): The string to search for.
13
+
14
+ Returns:
15
+ bool: True if sub_string is a subsequence of main_string, False otherwise.
16
+ """
17
+ main_string = main_string.lower()
18
+ sub_string = sub_string.lower()
19
+
20
+ main_len = len(main_string)
21
+ sub_len = len(sub_string)
22
+
23
+ if sub_len == 0:
24
+ return True
25
+ if main_len == 0:
26
+ return False
27
+
28
+ main_index = 0
29
+ sub_index = 0
30
+
31
+ while main_index < main_len and sub_index < sub_len:
32
+ if main_string[main_index] == sub_string[sub_index]:
33
+ sub_index += 1
34
+ main_index += 1
35
+
36
+ return sub_index == sub_len
37
+
38
+
39
+ def is_malicious(user_input: str):
40
+ """Function to check if the user input is malicious. Uses the LLM Guard prompt injection scanner.
41
+ Uses a special model `ProtectAI/deberta-v3-base-prompt-injection-v2` to scan the user input for malicious content.
42
+ """
43
+ scanner = PromptInjection(threshold=0.5, match_type=MatchType.FULL)
44
+ sanitized_prompt, is_valid, risk_score = scanner.scan(user_input)
45
+ return is_valid, risk_score