Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,13 +5,12 @@ import streamlit as st
|
|
5 |
from streamlit_chat import message
|
6 |
|
7 |
# some helper vars and functions
|
8 |
-
thispath = os.path.realpath(os.path.dirname(__file__))
|
9 |
if 'displayChat' not in st.session_state:
|
10 |
with st.spinner('Setting up plugins...'):
|
11 |
os.system('mkdir files')
|
12 |
thishour = str(datetime.datetime.now().hour)
|
13 |
-
if os.path.isfile(os.
|
14 |
-
with open(os.
|
15 |
stampHour = f.readline().strip('\n')
|
16 |
if thishour != stampHour:
|
17 |
os.system('rm -rf files/*')
|
@@ -27,32 +26,65 @@ if 'displayChat' not in st.session_state:
|
|
27 |
blipPath = os.path.join(thispath, 'plugins', 'blip2.py')
|
28 |
with open(blipPath, 'wb') as f:
|
29 |
f.write(blipS)
|
30 |
-
|
31 |
-
|
32 |
-
st.session_state['
|
33 |
-
|
34 |
-
st.session_state['
|
35 |
-
|
36 |
-
st.session_state['
|
37 |
-
|
38 |
-
st.session_state['
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
|
|
|
|
47 |
|
|
|
48 |
def formatTable(table):
|
49 |
lines = ''
|
50 |
for x, i in enumerate(table['GPT Commands']):
|
51 |
lines += '{} - {}\n'.format(table['GPT Commands'][x],table['GPT Explanations'][x])
|
52 |
return(lines)
|
53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
# WEB APP
|
55 |
-
st.markdown('# GPT-4
|
56 |
st.markdown('Made by [d3nt](https://github.com/d3n7) to give GPT-4 access to any commands/scripts you want via the command line. This unlocks the ability for GPT-4 to interact with the internet, APIs, and any applications that you could with a CLI. Basically it\'s open source, flexible, plugins for GPT-4.')
|
57 |
|
58 |
#User inputs
|
@@ -60,6 +92,7 @@ st.markdown('### OpenAI Settings')
|
|
60 |
openaikey = st.text_input('OpenAI API Key', type='password')
|
61 |
replicatekey = st.text_input('(OPTIONAL) Replicate API Key', type='password')
|
62 |
modelV = st.selectbox('Model', ('GPT-4', 'GPT-3.5-Turbo'))
|
|
|
63 |
st.markdown('### Editable Knowledge Base\nDelete any commands will not need to save tokens and increase accuracy.\n\nBe careful with the Raw Translation column. This is code that gets executed by your machine.')
|
64 |
d = {'GPT Commands': ['GOOGLE("question")', 'PYTHON(script.py)', 'MAKEFILE("content\\nhere", filename.txt)', 'READFILE(filename.txt)', 'LISTFILES()', 'BLIP("What\'s in this image?", img1.jpg)'],
|
65 |
'GPT Explanations': ['Search Google with the given text and return the results', 'Run a python script with the given file name. Do not use quotes for the filename argument.', 'Make a file with the given content and file name.', 'Read the content of a given filename', 'List the files you have access to', 'Ask BLIP-2, a vision model, a given question about a given image'],
|
@@ -67,7 +100,8 @@ d = {'GPT Commands': ['GOOGLE("question")', 'PYTHON(script.py)', 'MAKEFILE("cont
|
|
67 |
}
|
68 |
df = pd.DataFrame(data=d, dtype='string')
|
69 |
commandTable = st.experimental_data_editor(df, use_container_width=True, num_rows='dynamic')
|
70 |
-
|
|
|
71 |
uploadFile = st.file_uploader('')
|
72 |
cola, colb, colc = st.columns(3)
|
73 |
with cola:
|
@@ -79,16 +113,18 @@ with cola:
|
|
79 |
f.write(uploadFile.getbuffer())
|
80 |
st.write('Success')
|
81 |
with colb:
|
82 |
-
if st.button('
|
83 |
shutil.make_archive(os.path.join(thispath, 'files'), 'zip', os.path.join(thispath, 'files'))
|
84 |
-
st.session_state['
|
85 |
with colc:
|
86 |
-
if st.session_state['
|
87 |
with open(os.path.join(thispath, 'files.zip'), 'rb') as f:
|
88 |
st.download_button('Download Zip', f, file_name='file.zip', key='filezip')
|
|
|
89 |
st.markdown('### Chat')
|
90 |
prompt = st.text_input('Message')
|
91 |
col1, col2, col3, col4 = st.columns(4)
|
|
|
92 |
with col1:
|
93 |
if st.button('Send'):
|
94 |
st.session_state['running'] = True
|
@@ -99,91 +135,83 @@ with col3:
|
|
99 |
with col4:
|
100 |
manualApproval = st.checkbox('Require Manual Approval', True)
|
101 |
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
r = openai.ChatCompletion.create(model=modelV.lower(), messages=st.session_state['history'])
|
106 |
-
resp = r['choices'][0]['message']['content']
|
107 |
-
st.session_state['history'].append({'role': 'assistant', 'content': resp})
|
108 |
-
return resp
|
109 |
|
110 |
-
|
111 |
-
if
|
112 |
-
|
113 |
-
try:
|
114 |
-
p = subprocess.Popen(st.session_state['command'], shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
|
115 |
-
p.wait()
|
116 |
-
st.session_state['followupPrompt'] = 'Response: ' + p.communicate()[0].decode("utf-8")
|
117 |
-
except subprocess.CalledProcessError as e:
|
118 |
-
st.session_state['followupPrompt'] = 'Response: ' + e.output.decode("utf-8")
|
119 |
-
elif st.session_state['command'].startswith('rm '):
|
120 |
-
st.session_state['followupPrompt'] = "Response: Rm command disabled on HuggingFace"
|
121 |
-
else:
|
122 |
-
st.session_state['followupPrompt'] = "Response: User rejected this command"
|
123 |
-
st.session_state['followup'], st.session_state['running'] = True, True
|
124 |
-
st.experimental_rerun()
|
125 |
|
126 |
-
if st.session_state['running']:
|
127 |
-
st.session_state['running'] = False
|
128 |
os.environ['REPLICATE_API_TOKEN'] = replicatekey
|
129 |
-
if
|
|
|
130 |
if (newSession or st.session_state['history'] == []) and (not st.session_state['followup']):
|
131 |
st.session_state['history'] = [{'role': 'system', 'content': sysPrompt.format(formatTable(commandTable))}]
|
|
|
|
|
|
|
132 |
|
|
|
133 |
if not st.session_state['displayChat']:
|
134 |
st.session_state['displayChat'] = True
|
|
|
|
|
135 |
|
|
|
136 |
openai.api_key = openaikey
|
137 |
-
|
138 |
-
response = askGPT(st.session_state['followupPrompt'])
|
139 |
-
st.session_state['followup'] = False #completed, so reset this flag
|
140 |
-
else:
|
141 |
-
response = askGPT(prompt)
|
142 |
|
143 |
-
#parse GPT commands,
|
144 |
if len(regex.findall(regx[0], response)) >= 1:
|
145 |
cmd = regex.findall(regx[0], response)[0][0]
|
146 |
stem = ''
|
147 |
rawArgs = ''
|
148 |
cmdId = -1
|
|
|
|
|
149 |
for x, i in enumerate(cmd):
|
150 |
if i == '(':
|
151 |
stem = cmd[:x]
|
152 |
rawArgs = cmd[x+1:][:-1]
|
153 |
break
|
154 |
-
|
155 |
-
|
156 |
for x, i in enumerate(commandTable['GPT Commands']):
|
157 |
if stem in i:
|
158 |
cmdId = x
|
159 |
break
|
160 |
|
|
|
|
|
|
|
161 |
if cmdId == -1:
|
162 |
-
st.session_state['
|
163 |
-
|
164 |
-
st.experimental_rerun()
|
165 |
elif "'''" in rawArgs:
|
166 |
-
st.session_state['
|
167 |
-
|
168 |
-
st.experimental_rerun()
|
169 |
elif '"""' in rawArgs:
|
170 |
-
st.session_state['
|
171 |
-
|
172 |
-
st.experimental_rerun()
|
173 |
else:
|
|
|
174 |
st.session_state['command'] = commandTable['Raw Translation'][cmdId]
|
175 |
args = []
|
176 |
if rawArgs != '':
|
177 |
args = re.findall(regx[1], rawArgs)
|
178 |
st.session_state['command'] = st.session_state['command'].format(*args)
|
|
|
|
|
179 |
singleQuotes = False
|
180 |
for i in args:
|
181 |
if i.startswith("'"):
|
182 |
singleQuotes = True
|
183 |
-
st.session_state['
|
184 |
-
|
185 |
-
st.experimental_rerun()
|
186 |
break
|
|
|
|
|
187 |
if not singleQuotes:
|
188 |
if manualApproval:
|
189 |
st.session_state['acceptreject'] = True
|
@@ -191,14 +219,14 @@ if st.session_state['running']:
|
|
191 |
runCmd(1)
|
192 |
|
193 |
else:
|
194 |
-
st.warning('Make sure OpenAI key
|
195 |
|
|
|
|
|
196 |
if st.session_state['acceptreject']:
|
197 |
-
|
198 |
-
st.warning('GPT is trying to run the following command: ' + st.session_state['command'] + '\nPlease approve or deny this request.')
|
199 |
-
col5, col6 = st.columns(2)
|
200 |
with col5:
|
201 |
-
if st.button('
|
202 |
st.session_state['acceptreject'] = False
|
203 |
runCmd(1)
|
204 |
with col6:
|
@@ -206,17 +234,22 @@ if st.session_state['acceptreject']:
|
|
206 |
st.session_state['acceptreject'] = False
|
207 |
runCmd(0)
|
208 |
|
|
|
|
|
|
|
|
|
|
|
209 |
if st.session_state['displayChat']:
|
210 |
for i in st.session_state['history']:
|
211 |
if i['role'] == 'user':
|
212 |
if not showAll:
|
213 |
if 'Response:' not in i['content']:
|
214 |
-
message(i['content'], is_user=True, key=random.randint(1,
|
215 |
else:
|
216 |
-
message(i['content'], is_user=True, key=random.randint(1,
|
217 |
elif i['role'] == 'assistant':
|
218 |
if not showAll:
|
219 |
if 'COMMAND' not in i['content']:
|
220 |
-
message(i['content'], key=random.randint(1,
|
221 |
else:
|
222 |
-
message(i['content'], key=random.randint(1,
|
|
|
5 |
from streamlit_chat import message
|
6 |
|
7 |
# some helper vars and functions
|
|
|
8 |
if 'displayChat' not in st.session_state:
|
9 |
with st.spinner('Setting up plugins...'):
|
10 |
os.system('mkdir files')
|
11 |
thishour = str(datetime.datetime.now().hour)
|
12 |
+
if os.path.isfile(os.join(thispath, 'timestamp.txt')):
|
13 |
+
with open(os.join(thispath, 'timestamp.txt')) as f:
|
14 |
stampHour = f.readline().strip('\n')
|
15 |
if thishour != stampHour:
|
16 |
os.system('rm -rf files/*')
|
|
|
26 |
blipPath = os.path.join(thispath, 'plugins', 'blip2.py')
|
27 |
with open(blipPath, 'wb') as f:
|
28 |
f.write(blipS)
|
29 |
+
st.session_state['running'] = False # Triggers main GPT loop
|
30 |
+
st.session_state['followup'] = False # Follow flag for the main loop
|
31 |
+
st.session_state['prompt'] = ''
|
32 |
+
st.session_state['command'] = '' #command to be run locally
|
33 |
+
st.session_state['acceptreject'] = False #shows accept/reject buttons for when commands are called
|
34 |
+
st.session_state['history'] = [] #OpenAI convrsation history stored here
|
35 |
+
st.session_state['displayChat'] = False
|
36 |
+
st.session_state['displayCost'] = False
|
37 |
+
st.session_state['download'] = False #display download button
|
38 |
+
st.session_state['totalCost'] = 0 # total cost of API calls
|
39 |
+
regx = [r"([A-Z]+\(((?:[^()\"']|(?:\"[^\"]*\")|(?:'[^']*')|\((?1)*\))*)\))",
|
40 |
+
r'''(?:"(?:[^"\\]|\\.)*"|'(?:[^'\\]|\\.)*'|\b[^,]+)'''] #parsing commands, parsing arguments (thanks GPT-4)
|
41 |
+
#system message
|
42 |
+
sysPrompt = 'You now have access to some commands to help complete the user\'s request. ' \
|
43 |
+
'You are able to access the user\'s machine with these commands. In every message you send, ' \
|
44 |
+
'include "COMMAND: " with your command at the end. Here is a list of commands with ' \
|
45 |
+
'explanations of how they are used:\n{}\n When you use a command, the user will respond ' \
|
46 |
+
'with "Response: " followed by the output of the commmand. Use this output to help the ' \
|
47 |
+
'user complete their request.'
|
48 |
|
49 |
+
#format command table for GPT-4
|
50 |
def formatTable(table):
|
51 |
lines = ''
|
52 |
for x, i in enumerate(table['GPT Commands']):
|
53 |
lines += '{} - {}\n'.format(table['GPT Commands'][x],table['GPT Explanations'][x])
|
54 |
return(lines)
|
55 |
|
56 |
+
#Ask GPT a prompt, update history and total cost, return a response
|
57 |
+
def askGPT(input, version):
|
58 |
+
st.session_state['history'].append({'role': 'user', 'content': input})
|
59 |
+
with st.spinner('Talking to OpenAI...'):
|
60 |
+
r = openai.ChatCompletion.create(model=version, messages=st.session_state['history'])
|
61 |
+
resp = r['choices'][0]['message']['content']
|
62 |
+
costFactor = [0.03, 0.06] if version == 'gpt-4' else [0.002, 0.002]
|
63 |
+
st.session_state['totalCost'] += r['usage']['prompt_tokens']/1000*costFactor[0]+r['usage']['completion_tokens']/1000*costFactor[1]
|
64 |
+
st.session_state['history'].append({'role': 'assistant', 'content': resp})
|
65 |
+
return resp
|
66 |
+
|
67 |
+
#restart main loop with followup flag
|
68 |
+
def followup():
|
69 |
+
st.session_state['followup'], st.session_state['running'] = True, True
|
70 |
+
st.experimental_rerun()
|
71 |
+
|
72 |
+
#run a GPT command or reject it
|
73 |
+
def runCmd(flag):
|
74 |
+
if flag:
|
75 |
+
with st.spinner('Running command \'' + st.session_state['command'] + '\''):
|
76 |
+
try:
|
77 |
+
p = subprocess.Popen(st.session_state['command'], shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
|
78 |
+
p.wait()
|
79 |
+
st.session_state['prompt'] = 'Response: ' + p.communicate()[0].decode("utf-8")
|
80 |
+
except subprocess.CalledProcessError as e:
|
81 |
+
st.session_state['prompt'] = 'Response: ' + e.output.decode("utf-8")
|
82 |
+
else:
|
83 |
+
st.session_state['prompt'] = "Response: User rejected this command"
|
84 |
+
followup()
|
85 |
+
|
86 |
# WEB APP
|
87 |
+
st.markdown('# GPT-4 UNLIMITED PLUGINS')
|
88 |
st.markdown('Made by [d3nt](https://github.com/d3n7) to give GPT-4 access to any commands/scripts you want via the command line. This unlocks the ability for GPT-4 to interact with the internet, APIs, and any applications that you could with a CLI. Basically it\'s open source, flexible, plugins for GPT-4.')
|
89 |
|
90 |
#User inputs
|
|
|
92 |
openaikey = st.text_input('OpenAI API Key', type='password')
|
93 |
replicatekey = st.text_input('(OPTIONAL) Replicate API Key', type='password')
|
94 |
modelV = st.selectbox('Model', ('GPT-4', 'GPT-3.5-Turbo'))
|
95 |
+
|
96 |
st.markdown('### Editable Knowledge Base\nDelete any commands will not need to save tokens and increase accuracy.\n\nBe careful with the Raw Translation column. This is code that gets executed by your machine.')
|
97 |
d = {'GPT Commands': ['GOOGLE("question")', 'PYTHON(script.py)', 'MAKEFILE("content\\nhere", filename.txt)', 'READFILE(filename.txt)', 'LISTFILES()', 'BLIP("What\'s in this image?", img1.jpg)'],
|
98 |
'GPT Explanations': ['Search Google with the given text and return the results', 'Run a python script with the given file name. Do not use quotes for the filename argument.', 'Make a file with the given content and file name.', 'Read the content of a given filename', 'List the files you have access to', 'Ask BLIP-2, a vision model, a given question about a given image'],
|
|
|
100 |
}
|
101 |
df = pd.DataFrame(data=d, dtype='string')
|
102 |
commandTable = st.experimental_data_editor(df, use_container_width=True, num_rows='dynamic')
|
103 |
+
|
104 |
+
st.markdown('### Upload/Download Files')
|
105 |
uploadFile = st.file_uploader('')
|
106 |
cola, colb, colc = st.columns(3)
|
107 |
with cola:
|
|
|
113 |
f.write(uploadFile.getbuffer())
|
114 |
st.write('Success')
|
115 |
with colb:
|
116 |
+
if st.button('Download'):
|
117 |
shutil.make_archive(os.path.join(thispath, 'files'), 'zip', os.path.join(thispath, 'files'))
|
118 |
+
st.session_state['download'] = True
|
119 |
with colc:
|
120 |
+
if st.session_state['download']:
|
121 |
with open(os.path.join(thispath, 'files.zip'), 'rb') as f:
|
122 |
st.download_button('Download Zip', f, file_name='file.zip', key='filezip')
|
123 |
+
|
124 |
st.markdown('### Chat')
|
125 |
prompt = st.text_input('Message')
|
126 |
col1, col2, col3, col4 = st.columns(4)
|
127 |
+
#this button triggers main loop
|
128 |
with col1:
|
129 |
if st.button('Send'):
|
130 |
st.session_state['running'] = True
|
|
|
135 |
with col4:
|
136 |
manualApproval = st.checkbox('Require Manual Approval', True)
|
137 |
|
138 |
+
#MAIN GPT LOOP
|
139 |
+
if st.session_state['running']:
|
140 |
+
st.session_state['running'] = False #reset running flag
|
|
|
|
|
|
|
|
|
141 |
|
142 |
+
#get user prompt
|
143 |
+
if not st.session_state['followup']:
|
144 |
+
st.session_state['prompt'] = prompt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
145 |
|
|
|
|
|
146 |
os.environ['REPLICATE_API_TOKEN'] = replicatekey
|
147 |
+
if openaikey != '':
|
148 |
+
#set system prompt or update system prompt
|
149 |
if (newSession or st.session_state['history'] == []) and (not st.session_state['followup']):
|
150 |
st.session_state['history'] = [{'role': 'system', 'content': sysPrompt.format(formatTable(commandTable))}]
|
151 |
+
else:
|
152 |
+
st.session_state['history'][0] = {'role': 'system', 'content': sysPrompt.format(formatTable(commandTable))}
|
153 |
+
st.session_state['followup'] = False #reset followup flag
|
154 |
|
155 |
+
#turn on display for chat and cost if it's not already on
|
156 |
if not st.session_state['displayChat']:
|
157 |
st.session_state['displayChat'] = True
|
158 |
+
if not st.session_state['displayCost']:
|
159 |
+
st.session_state['displayCost'] = True
|
160 |
|
161 |
+
#ask GPT-4
|
162 |
openai.api_key = openaikey
|
163 |
+
response = askGPT(st.session_state['prompt'], modelV.lower())
|
|
|
|
|
|
|
|
|
164 |
|
165 |
+
#parse GPT commands, possibly trigger this loop again
|
166 |
if len(regex.findall(regx[0], response)) >= 1:
|
167 |
cmd = regex.findall(regx[0], response)[0][0]
|
168 |
stem = ''
|
169 |
rawArgs = ''
|
170 |
cmdId = -1
|
171 |
+
|
172 |
+
#seperate command into stem and arguments
|
173 |
for x, i in enumerate(cmd):
|
174 |
if i == '(':
|
175 |
stem = cmd[:x]
|
176 |
rawArgs = cmd[x+1:][:-1]
|
177 |
break
|
178 |
+
|
179 |
+
#identify command
|
180 |
for x, i in enumerate(commandTable['GPT Commands']):
|
181 |
if stem in i:
|
182 |
cmdId = x
|
183 |
break
|
184 |
|
185 |
+
#Handle incorrect command usage, or run the command
|
186 |
+
rawArgs.replace('\n', '\\n')
|
187 |
+
rawArgs.replace('\\\n', '\\n')
|
188 |
if cmdId == -1:
|
189 |
+
st.session_state['prompt'] = 'Response: Unrecognized command'
|
190 |
+
followup()
|
|
|
191 |
elif "'''" in rawArgs:
|
192 |
+
st.session_state['prompt'] = 'Response: Error parsing multi-line string (\'\'\') Use a single line with escaped newlines instead (")'
|
193 |
+
followup()
|
|
|
194 |
elif '"""' in rawArgs:
|
195 |
+
st.session_state['prompt'] = 'Response: Error parsing multi-line string (\"\"\") Use a single line with escaped newlines instead (")'
|
196 |
+
followup()
|
|
|
197 |
else:
|
198 |
+
# Fetch command, turn raw argument string into a list of arguments, and format command
|
199 |
st.session_state['command'] = commandTable['Raw Translation'][cmdId]
|
200 |
args = []
|
201 |
if rawArgs != '':
|
202 |
args = re.findall(regx[1], rawArgs)
|
203 |
st.session_state['command'] = st.session_state['command'].format(*args)
|
204 |
+
|
205 |
+
# No single quote arguments allowed. Messes up MAKEFILE() and probably other commands.
|
206 |
singleQuotes = False
|
207 |
for i in args:
|
208 |
if i.startswith("'"):
|
209 |
singleQuotes = True
|
210 |
+
st.session_state['prompt'] = "Response: Error parsing argument in single quotes. Use double quotes around the argument instead"
|
211 |
+
followup()
|
|
|
212 |
break
|
213 |
+
|
214 |
+
#If none of the above was a problem, run the command
|
215 |
if not singleQuotes:
|
216 |
if manualApproval:
|
217 |
st.session_state['acceptreject'] = True
|
|
|
219 |
runCmd(1)
|
220 |
|
221 |
else:
|
222 |
+
st.warning('Make sure OpenAI key is entered', icon='⚠️')
|
223 |
|
224 |
+
#UI for accepting/rejecting commands
|
225 |
+
col5, col6 = st.columns(2)
|
226 |
if st.session_state['acceptreject']:
|
227 |
+
st.warning('GPT is trying to run the following command: ' + st.session_state['command'] + '\nPlease accept or reject this request.')
|
|
|
|
|
228 |
with col5:
|
229 |
+
if st.button('Accept'):
|
230 |
st.session_state['acceptreject'] = False
|
231 |
runCmd(1)
|
232 |
with col6:
|
|
|
234 |
st.session_state['acceptreject'] = False
|
235 |
runCmd(0)
|
236 |
|
237 |
+
#display cost for the user
|
238 |
+
if st.session_state['displayCost']:
|
239 |
+
st.info('Total OpenAI cost: $'+str(round(st.session_state['totalCost'],2)), icon='💸')
|
240 |
+
|
241 |
+
#display chat for the user
|
242 |
if st.session_state['displayChat']:
|
243 |
for i in st.session_state['history']:
|
244 |
if i['role'] == 'user':
|
245 |
if not showAll:
|
246 |
if 'Response:' not in i['content']:
|
247 |
+
message(i['content'], is_user=True, key=random.randint(1,9999))
|
248 |
else:
|
249 |
+
message(i['content'], is_user=True, key=random.randint(1,9999))
|
250 |
elif i['role'] == 'assistant':
|
251 |
if not showAll:
|
252 |
if 'COMMAND' not in i['content']:
|
253 |
+
message(i['content'], key=random.randint(1,9999))
|
254 |
else:
|
255 |
+
message(i['content'], key=random.randint(1,9999))
|