TypeError: 'NoneType' object is not callable

#5
by catworld1212 - opened

I am getting "TypeError: 'NoneType' object is not callable" error when I try to use the model.

  import base64
        from io import BytesIO
        import torch
        from PIL import Image
        import requests
        from transformers import AutoModelForCausalLM, AutoTokenizer

        tokenizer = AutoTokenizer.from_pretrained(
            "internlm/internlm-xcomposer2d5-7b", trust_remote_code=True)
        model = AutoModelForCausalLM.from_pretrained(
            "internlm/internlm-xcomposer2d5-7b",
            torch_dtype=torch.bfloat16,
            low_cpu_mem_usage=True,
            trust_remote_code=True
        ).to("cuda").eval()

        response = requests.get(url)
        with open("test.png", "wb") as f:
            f.write(response.content)

        with torch.autocast(device_type='cuda', dtype=torch.float16):
            response = model.write_webpage(
                query, ["test.png"], seed=202, task="what do you see?", repetition_penalty=3.0)
        print(response)
        return response

Same issue when I use model.resume_2_webpage

I found that setting the tokenizer on the model object works
tokenizer = AutoTokenizer.from_pretrained(
"internlm/internlm-xcomposer2d5-7b", trust_remote_code=True)

model = AutoModelForCausalLM.from_pretrained(
"internlm/internlm-xcomposer2d5-7b",
torch_dtype=torch.bfloat16,
low_cpu_mem_usage=True,
trust_remote_code=True
).to("cuda")

model.tokenizer = tokenizer
model.eval()

InternLM org

@catworld1212 Hi, please set the model.tokenizer = tokenizer as follows:

init model and tokenizer

model = AutoModel.from_pretrained('internlm/internlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval()
tokenizer = AutoTokenizer.from_pretrained('internlm/internlm-xcomposer2d5-7b', trust_remote_code=True)
model.tokenizer = tokenizer

@myownskyW7 I did that, but now I get this error:

Traceback (most recent call last):
  File "/pkg/modal/_container_io_manager.py", line 488, in handle_input_exception
    yield
  File "/pkg/modal/_container_entrypoint.py", line 226, in run_input_sync
    res = imp_fun.fun(*args, **kwargs)
  File "/root/main.py", line 66, in generate
    model = AutoModel.from_pretrained(
  File "/usr/local/lib/python3.9/site-packages/transformers/models/auto/auto_factory.py", line 559, in from_pretrained
    return model_class.from_pretrained(
  File "/usr/local/lib/python3.9/site-packages/transformers/modeling_utils.py", line 3710, in from_pretrained
    model = cls(config, *model_args, **model_kwargs)
  File "/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-xcomposer2d5-7b/dc3215db0063f06d0a48b20eda8a8031e8d41e05/modeling_internlm_xcomposer2.py", line 105, in __init__
    self.font = get_font()
  File "/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-xcomposer2d5-7b/dc3215db0063f06d0a48b20eda8a8031e8d41e05/ixc_utils.py", line 13, in get_font
    ff = urlopen(truetype_url)
  File "/usr/local/lib/python3.9/urllib/request.py", line 214, in urlopen
    return opener.open(url, data, timeout)
  File "/usr/local/lib/python3.9/urllib/request.py", line 523, in open
    response = meth(req, response)
  File "/usr/local/lib/python3.9/urllib/request.py", line 632, in http_response
    response = self.parent.error(
  File "/usr/local/lib/python3.9/urllib/request.py", line 561, in error
    return self._call_chain(*args)
  File "/usr/local/lib/python3.9/urllib/request.py", line 494, in _call_chain
    result = func(*args)
  File "/usr/local/lib/python3.9/urllib/request.py", line 641, in http_error_default
    raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 403: Forbidden

download font file "SimHei.ttf"

in ixc_utils.py change this part to your font file location

def get_font():
    #truetype_url = 'the previous url'
    #ff = urlopen(truetype_url)
    font = ImageFont.truetype(r"C:\Path\to\SimHei.ttf", size=40)
    return font

I think there is a better solution but this is a quick fix to get you running

InternLM org

@catworld1212 @Rodeszones hi, I have updated the file URL in ixc_utils, and it works automatically now.

@myownskyW7 still getting an error "The number of elements in 'fill' does not match the number of channels of the image (3 != 4)"

Sign up or log in to comment