tensorflow 如何修复AttributeError:部分初始化的模块“charset_normalizer”没有属性“md__mypyc”(很可能是由于循环导入)

dkqlctbz  于 2023-03-19  发布在  其他
关注(0)|答案(1)|浏览(2383)

当我输入“import tensorflow as tf”后,导入TensorFlow GPU时出现此错误属性错误:部分初始化的模块“charset_normalizer”没有属性“md__mypyc”(很可能是由于循环导入),如下所示:
你的短信

AttributeError                            Traceback (most recent call last)
Cell In[22], line 1
----> 1 import tensorflow as tf

File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\__init__.py:51
     49 from ._api.v2 import autograph
     50 from ._api.v2 import bitwise
---> 51 from ._api.v2 import compat
     52 from ._api.v2 import config
     53 from ._api.v2 import data

File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\__init__.py:37
      3 """Compatibility functions.
      4 
      5 The `tf.compat` module contains two sets of compatibility functions.
   (...)
     32 
     33 """
     35 import sys as _sys
---> 37 from . import v1
     38 from . import v2
     39 from tensorflow.python.compat.compat import forward_compatibility_horizon

File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\v1\__init__.py:30
     28 from . import autograph
     29 from . import bitwise
---> 30 from . import compat
     31 from . import config
     32 from . import data

File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\v1\compat\__init__.py:38
     35 import sys as _sys
     37 from . import v1
---> 38 from . import v2
     39 from tensorflow.python.compat.compat import forward_compatibility_horizon
     40 from tensorflow.python.compat.compat import forward_compatible

File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\v1\compat\v2\__init__.py:28
     25 # pylint: disable=g-bad-import-order
     27 from . import compat
---> 28 from tensorflow._api.v2.compat.v2 import __internal__
     29 from tensorflow._api.v2.compat.v2 import __operators__
     30 from tensorflow._api.v2.compat.v2 import audio

File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\v2\__init__.py:33
     31 from . import autograph
     32 from . import bitwise
---> 33 from . import compat
     34 from . import config
     35 from . import data

File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\v2\compat\__init__.py:38
     35 import sys as _sys
     37 from . import v1
---> 38 from . import v2
     39 from tensorflow.python.compat.compat import forward_compatibility_horizon
     40 from tensorflow.python.compat.compat import forward_compatible

File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\v2\compat\v2\__init__.py:37
     35 from tensorflow._api.v2.compat.v2 import data
     36 from tensorflow._api.v2.compat.v2 import debugging
---> 37 from tensorflow._api.v2.compat.v2 import distribute
     38 from tensorflow._api.v2.compat.v2 import dtypes
     39 from tensorflow._api.v2.compat.v2 import errors

File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\v2\distribute\__init__.py:182
    180 from . import cluster_resolver
    181 from . import coordinator
--> 182 from . import experimental
    183 from tensorflow.python.distribute.collective_all_reduce_strategy import CollectiveAllReduceStrategy as MultiWorkerMirroredStrategy
    184 from tensorflow.python.distribute.cross_device_ops import CrossDeviceOps

File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\v2\distribute\experimental\__init__.py:10
      8 from . import coordinator
      9 from . import partitioners
---> 10 from . import rpc
     11 from tensorflow.python.distribute.central_storage_strategy import CentralStorageStrategy
     12 from tensorflow.python.distribute.collective_all_reduce_strategy import _CollectiveAllReduceStrategyExperimental as MultiWorkerMirroredStrategy

File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\v2\distribute\experimental\rpc\__init__.py:8
      3 """Public API for tf.distribute.experimental.rpc namespace.
      4 """
      6 import sys as _sys
----> 8 from tensorflow.python.distribute.experimental.rpc.rpc_ops import Client
      9 from tensorflow.python.distribute.experimental.rpc.rpc_ops import Server

File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\python\distribute\experimental\__init__.py:22
     20 from tensorflow.python.distribute import parameter_server_strategy
     21 from tensorflow.python.distribute import tpu_strategy
---> 22 from tensorflow.python.distribute.failure_handling import failure_handling

File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\python\distribute\failure_handling\failure_handling.py:33
     31 from tensorflow.python.checkpoint import checkpoint_management
     32 from tensorflow.python.distribute import multi_worker_util
---> 33 from tensorflow.python.distribute.failure_handling import gce_util
     34 from tensorflow.python.eager import context
     35 from tensorflow.python.framework import constant_op

File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\python\distribute\failure_handling\gce_util.py:20
     17 import os
     18 import sys
---> 20 import requests
     22 from six.moves.urllib import request
     23 from tensorflow.python.eager import context

File ~\anaconda3\envs\tf_gpu\lib\site-packages\requests\__init__.py:48
     45 from .exceptions import RequestsDependencyWarning
     47 try:
---> 48     from charset_normalizer import __version__ as charset_normalizer_version
     49 except ImportError:
     50     charset_normalizer_version = None

File ~\anaconda3\envs\tf_gpu\lib\site-packages\charset_normalizer\__init__.py:23
      1 """
      2 Charset-Normalizer
      3 ~~~~~~~~~~~~~~
   (...)
     21 :license: MIT, see LICENSE for more details.
     22 """
---> 23 from charset_normalizer.api import from_fp, from_path, from_bytes, normalize
     24 from charset_normalizer.legacy import detect
     25 from charset_normalizer.version import __version__, VERSION

File ~\anaconda3\envs\tf_gpu\lib\site-packages\charset_normalizer\api.py:10
      7     PathLike = Union[str, 'os.PathLike[str]']  # type: ignore
      9 from charset_normalizer.constant import TOO_SMALL_SEQUENCE, TOO_BIG_SEQUENCE, IANA_SUPPORTED
---> 10 from charset_normalizer.md import mess_ratio
     11 from charset_normalizer.models import CharsetMatches, CharsetMatch
     12 from warnings import warn

AttributeError: partially initialized module 'charset_normalizer' has no attribute 'md__mypyc' (most likely due to a circular import)

t '
我安装了“requests”、“chardet”、“openpyxl”,但是没有任何变化。

ao218c7q

ao218c7q1#

您的跟踪没有显示安装了哪个版本的字符集规范化程序包。
我在使用Ray训练xgboost模型时遇到了类似的错误。我安装了charset-normalizer v3.0.1。升级到v3.1.0修复了这个错误。
试试跑

pip install --force-reinstall charset-normalizer==3.1.0

或者干脆

pip install -U --force-reinstall charset-normalizer

然后重新运行您的代码,看看这是否有效!

相关问题