gregH commited on
Commit
9c7e29f
1 Parent(s): d367953

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -93,7 +93,7 @@ def embedding_shift(original_embedding,shift_embeddings,prefix_embedding,suffix_
93
  return input_embeddings
94
  def engine(input_embeds):
95
  output_text = []
96
- batch_size = 20
97
  with torch.no_grad():
98
  for start in range(0,len(input_embeds),batch_size):
99
  batch_input_embeds = input_embeds[start:start+batch_size]
@@ -160,7 +160,7 @@ def gradient_cuff_reject(message,with_defense, sample_times,perturb_times,thresh
160
  return False
161
 
162
  def chat(message, history, with_defense,perturb_times):
163
- sample_times=20
164
  threshold=thresholds[perturb_times-1]
165
  if gradient_cuff_reject(message,with_defense, sample_times, perturb_times, threshold):
166
  answer="[Gradient Cuff Rejection] I cannot fulfill your request".split(" ")
@@ -184,7 +184,7 @@ def chat(message, history, with_defense,perturb_times):
184
  generate_kwargs = dict(
185
  model_inputs,
186
  streamer=streamer,
187
- max_new_tokens=256,
188
  do_sample=True,
189
  top_p=0.90,
190
  temperature=0.6,
 
93
  return input_embeddings
94
  def engine(input_embeds):
95
  output_text = []
96
+ batch_size = 10
97
  with torch.no_grad():
98
  for start in range(0,len(input_embeds),batch_size):
99
  batch_input_embeds = input_embeds[start:start+batch_size]
 
160
  return False
161
 
162
  def chat(message, history, with_defense,perturb_times):
163
+ sample_times=10
164
  threshold=thresholds[perturb_times-1]
165
  if gradient_cuff_reject(message,with_defense, sample_times, perturb_times, threshold):
166
  answer="[Gradient Cuff Rejection] I cannot fulfill your request".split(" ")
 
184
  generate_kwargs = dict(
185
  model_inputs,
186
  streamer=streamer,
187
+ max_new_tokens=10,
188
  do_sample=True,
189
  top_p=0.90,
190
  temperature=0.6,