rogerxfeng8
commited on
Commit
•
e203eff
1
Parent(s):
e4f4e18
Update modeling_phi3_small.py
Browse files- modeling_phi3_small.py +2 -1
modeling_phi3_small.py
CHANGED
@@ -216,7 +216,8 @@ class Phi3SmallSelfAttention(nn.Module):
|
|
216 |
f"{self.config.dense_attention_every_n_layers}"
|
217 |
)
|
218 |
# use warnings to allow the modeling use different flash attention implementation later
|
219 |
-
|
|
|
220 |
else:
|
221 |
# BlockSparse related Parameters
|
222 |
self.blocksparse_params = BlockSparseParams.from_config(config)
|
|
|
216 |
f"{self.config.dense_attention_every_n_layers}"
|
217 |
)
|
218 |
# use warnings to allow the modeling use different flash attention implementation later
|
219 |
+
if not is_flash_attention_available:
|
220 |
+
logger.warning_once("Flash Attention is not available, but is needed for dense attention")
|
221 |
else:
|
222 |
# BlockSparse related Parameters
|
223 |
self.blocksparse_params = BlockSparseParams.from_config(config)
|