在这里,我们将给大家分享关于Pythonnumpy模块-ufunc()实例源码的知识,让您更了解python中numpy模块的本质,同时也会涉及到如何更有效地AttributeError:Float'
在这里,我们将给大家分享关于Python numpy 模块-ufunc() 实例源码的知识,让您更了解python中numpy模块的本质,同时也会涉及到如何更有效地AttributeError:Float' 对象没有属性日志 /TypeError: ufunc 'log' 不支持输入类型、matplotlib 错误:ufunc 循环不支持 float 类型的参数 0,该参数没有可调用的 rint 方法、NetCDF Python 无法将 ufunc 'multiply' 输出从 dtype('
- Python numpy 模块-ufunc() 实例源码(python中numpy模块)
- AttributeError:Float' 对象没有属性日志 /TypeError: ufunc 'log' 不支持输入类型
- matplotlib 错误:ufunc 循环不支持 float 类型的参数 0,该参数没有可调用的 rint 方法
- NetCDF Python 无法将 ufunc 'multiply' 输出从 dtype('
- Numpy ufunc.reduce 比在 ndarray.tolist 之后应用原生 python reduce 慢?
Python numpy 模块-ufunc() 实例源码(python中numpy模块)
Python numpy 模块,ufunc() 实例源码
我们从Python开源项目中,提取了以下19个代码示例,用于说明如何使用numpy.ufunc()。
- def __deepcopy__(self, memo):
- obj = type(self).__new__(type(self))
- if isinstance(self.base_value, VectorizedIterable): # special case,but perhaps need to rethink
- obj.base_value = self.base_value # whether deepcopy is appropriate everywhere
- else:
- try:
- obj.base_value = deepcopy(self.base_value)
- except TypeError: # base_value cannot be copied,e.g. is a generator (but see generator_tools from PyPI)
- obj.base_value = self.base_value # so here we create a reference rather than deepcopying - Could cause problems
- obj._shape = self._shape
- obj.dtype = self.dtype
- obj.operations = []
- for f, arg in self.operations:
- if isinstance(f, numpy.ufunc):
- obj.operations.append((f, deepcopy(arg)))
- else:
- obj.operations.append((deepcopy(f), deepcopy(arg)))
- return obj
- def __array_prepare__(self, result, context=None):
- """
- Gets called prior to a ufunc
- """
- # nice error message for non-ufunc types
- if context is not None and not isinstance(self._values, np.ndarray):
- obj = context[1][0]
- raise TypeError("{obj} with dtype {dtype} cannot perform "
- "the numpy op {op}".format(
- obj=type(obj).__name__,
- dtype=getattr(obj, ''dtype'', None),
- op=context[0].__name__))
- return result
- # complex
- def __array_wrap__(self, context=None):
- """
- Gets called after a ufunc. Needs additional handling as
- Periodindex stores internal data as int dtype
- Replace this to __numpy_ufunc__ in future version
- """
- if isinstance(context, tuple) and len(context) > 0:
- func = context[0]
- if (func is np.add):
- return self._add_delta(context[1][1])
- elif (func is np.subtract):
- return self._add_delta(-context[1][1])
- elif isinstance(func, np.ufunc):
- if ''M->M'' not in func.types:
- msg = "ufunc ''{0}'' not supported for the Periodindex"
- # This should be TypeError,but TypeError cannot be raised
- # from here because numpy catches.
- raise ValueError(msg.format(func.__name__))
- if com.is_bool_dtype(result):
- return result
- return Periodindex(result, freq=self.freq, name=self.name)
- def __init__(self, scalar_op, inplace_pattern=None, name=None,
- nfunc_spec=None, openmp=None):
- if inplace_pattern is None:
- inplace_pattern = {}
- self.name = name
- self.scalar_op = scalar_op
- self.inplace_pattern = inplace_pattern
- self.destroy_map = dict((o, [i]) for o, i in inplace_pattern.items())
- self.ufunc = None
- self.nfunc = None
- if nfunc_spec is None:
- nfunc_spec = getattr(scalar_op, ''nfunc_spec'', None)
- self.nfunc_spec = nfunc_spec
- if nfunc_spec:
- self.nfunc = getattr(numpy, nfunc_spec[0])
- # precompute the hash of this node
- self._rehash()
- super(Elemwise, self).__init__(openmp=openmp)
- def set_ufunc(self, scalar_op):
- # This is probably a speed up of the implementation
- if isinstance(scalar_op, theano.scalar.basic.Add):
- self.ufunc = numpy.add
- elif isinstance(scalar_op, theano.scalar.basic.Mul):
- self.ufunc = numpy.multiply
- elif isinstance(scalar_op, theano.scalar.basic.Maximum):
- self.ufunc = numpy.maximum
- elif isinstance(scalar_op, theano.scalar.basic.Minimum):
- self.ufunc = numpy.minimum
- elif isinstance(scalar_op, theano.scalar.basic.AND):
- self.ufunc = numpy.bitwise_and
- elif isinstance(scalar_op, theano.scalar.basic.OR):
- self.ufunc = numpy.bitwise_or
- elif isinstance(scalar_op, theano.scalar.basic.XOR):
- self.ufunc = numpy.bitwise_xor
- else:
- self.ufunc = numpy.frompyfunc(scalar_op.impl, 2, 1)
- def _check_binary_ufunc(ufunc):
- """ Check that ufunc is suitable for ``ireduce_ufunc`` """
- if not isinstance(ufunc, np.ufunc):
- raise TypeError(''{} is not a NumPy Ufunc''.format(ufunc.__name__))
- if not ufunc.nin == 2:
- raise ValueError(''Only binary ufuncs are supported,and {} is \\
- not one of them''.format(ufunc.__name__))
- # Ufuncs that always return bool are problematic because they can be reduced
- # but not be accumulated.
- # Recall: numpy.dtype(''?'') == np.bool
- if all(type_signature[-1] == ''?'' for type_signature in ufunc.types):
- raise ValueError(''Only binary ufuncs that preserve type are supported,\\
- and {} is not one of them''.format(ufunc.__name__))
- def _ireduce_ufunc_new_axis(arrays, ufunc, **kwargs):
- """
- Reduction operation for arrays,in the direction of a new axis (i.e. stacking).
- Parameters
- ----------
- arrays : iterable
- Arrays to be reduced.
- ufunc : numpy.ufunc
- Binary universal function. Must have a signature of the form ufunc(x1,x2,...)
- kwargs
- Keyword arguments are passed to ``ufunc``.
- Yields
- ------
- reduced : ndarray
- """
- arrays = iter(arrays)
- first = next(arrays)
- kwargs.pop(''axis'')
- dtype = kwargs.get(''dtype'', None)
- if dtype is None:
- dtype = first.dtype
- else:
- kwargs[''casting''] = ''unsafe''
- # If the out parameter was already given
- # we create the accumulator from it
- # Otherwise,it is a copy of the first array
- accumulator = kwargs.pop(''out'', None)
- if accumulator is not None:
- accumulator[:] = first
- else:
- accumulator = np.array(first, copy = True).astype(dtype)
- yield accumulator
- for array in arrays:
- ufunc(accumulator, array, out = accumulator, **kwargs)
- yield accumulator
- def _ireduce_ufunc_all_axes(arrays,over all axes.
- Parameters
- ----------
- arrays : iterable
- Arrays to be reduced.
- ufunc : numpy.ufunc
- Binary universal function. Must have a signature of the form ufunc(x1,...)
- kwargs
- Keyword arguments are passed to ``ufunc``. The ``out`` parameter is ignored.
- Yields
- ------
- reduced : scalar
- """
- arrays = iter(arrays)
- first = next(arrays)
- kwargs[''axis''] = None
- kwargs.pop(''out'', None) # Remove the out-parameter if provided.
- axis_reduce = partial(ufunc.reduce, **kwargs)
- accumulator = axis_reduce(first)
- yield accumulator
- for array in arrays:
- accumulator = axis_reduce([accumulator, axis_reduce(array)])
- yield accumulator
- def _validate_method(method, dy, fit_bias, nterms,
- frequency, assume_regular_frequency):
- fast_method_ok = hasattr(np.ufunc, ''at'')
- if not fast_method_ok:
- warnings.warn("Fast Lomb-Scargle methods require numpy version 1.8 "
- "or newer. Using slower methods instead.")
- # automatically choose the appropiate method
- if method == ''auto'':
- if nterms != 1:
- if (fast_method_ok and len(frequency) > 100
- and _is_regular(frequency, assume_regular_frequency)):
- method = ''fastchi2''
- else:
- method = ''chi2''
- elif (fast_method_ok and len(frequency) > 100
- and _is_regular(frequency, assume_regular_frequency)):
- method = ''fast''
- elif dy is None and not fit_bias:
- method = ''scipy''
- else:
- method = ''slow''
- if method not in METHODS:
- raise ValueError("invalid method: {0}".format(method))
- return method
- def _build_ufunc(func):
- """Return a ufunc that works with lazy arrays"""
- def larray_compatible_ufunc(x):
- if isinstance(x, larray):
- y = deepcopy(x)
- y.apply(func)
- return y
- else:
- return func(x)
- return larray_compatible_ufunc
- def __array_wrap__(self, context=None):
- """
- Gets called after a ufunc
- """
- return self._constructor(result, index=self.index,
- copy=False).__finalize__(self)
- def apply(self, func, axis=0, broadcast=False, reduce=False):
- """
- Analogous to DataFrame.apply,for SparseDataFrame
- Parameters
- ----------
- func : function
- Function to apply to each column
- axis : {0,1,''index'',''columns''}
- broadcast : bool,default False
- For aggregation functions,return object of same size with values
- propagated
- Returns
- -------
- applied : Series or SparseDataFrame
- """
- if not len(self.columns):
- return self
- axis = self._get_axis_number(axis)
- if isinstance(func, np.ufunc):
- new_series = {}
- for k, v in compat.iteritems(self):
- applied = func(v)
- applied.fill_value = func(applied.fill_value)
- new_series[k] = applied
- return self._constructor(
- new_series, columns=self.columns,
- default_fill_value=self._default_fill_value,
- kind=self._default_kind).__finalize__(self)
- else:
- if not broadcast:
- return self._apply_standard(func, axis, reduce=reduce)
- else:
- return self._apply_broadcast(func, axis)
- def __getstate__(self):
- d = copy(self.__dict__)
- d.pop(''ufunc'')
- d.pop(''nfunc'')
- d.pop(''__epydoc_asRoutine'', None)
- d.pop(''_hashval'')
- return d
- def __setstate__(self, d):
- super(Elemwise, self).__setstate__(d)
- self.ufunc = None
- self.nfunc = None
- if getattr(self, None):
- self.nfunc = getattr(numpy, self.nfunc_spec[0])
- elif 0 < self.scalar_op.nin < 32:
- self.ufunc = numpy.frompyfunc(self.scalar_op.impl,
- self.scalar_op.nin,
- self.scalar_op.nout)
- self._rehash()
- def reduce_ufunc(*args, **kwargs):
- """
- Streaming reduction generator function from a binary NumPy ufunc. Essentially the
- function equivalent to `ireduce_ufunc`.
- ``ufunc`` must be a NumPy binary Ufunc (i.e. it takes two arguments). Moreover,
- for performance reasons,ufunc must have the same return types as input types.
- This precludes the use of ``numpy.greater``,for example.
- Note that performance is much better for the default ``axis = -1``. In such a case,
- reduction operations can occur in-place. This also allows to operate in constant-memory.
- Parameters
- ----------
- arrays : iterable
- Arrays to be reduced.
- ufunc : numpy.ufunc
- Binary universal function.
- axis : int or None,optional
- Reduction axis. Default is to reduce the arrays in the stream as if
- they had been stacked along a new axis,then reduce along this new axis.
- If None,arrays are flattened before reduction. If `axis` is an int larger that
- the number of dimensions in the arrays of the stream,arrays are reduced
- along the new axis. Note that not all of NumPy Ufuncs support
- ``axis = None``,e.g. ``numpy.subtract``.
- dtype : numpy.dtype or None,optional
- Overrides the dtype of the calculation and output arrays.
- ignore_nan : bool,optional
- If True and ufunc has an identity value (e.g. ``numpy.add.identity`` is 0),then NaNs
- are replaced with this identity. An error is raised if ``ufunc`` has no identity (e.g. ``numpy.maximum.identity`` is ``None``).
- kwargs
- Keyword arguments are passed to ``ufunc``. Note that some valid ufunc keyword arguments
- (e.g. ``keepdims``) are not valid for all streaming functions. Note that
- contrary to NumPy v. 1.10+,``casting = ''unsafe`` is the default in npstreams.
- Yields
- ------
- reduced : ndarray or scalar
- Raises
- ------
- TypeError : if ``ufunc`` is not NumPy ufunc.
- ValueError : if ``ignore_nan`` is True but ``ufunc`` has no identity
- ValueError: if ``ufunc`` is not a binary ufunc
- ValueError: if ``ufunc`` does not have the same input type as output type
- """
- return last(ireduce_ufunc(*args, **kwargs))
- def _ireduce_ufunc_existing_axis(arrays,in the direction of an existing axis.
- Parameters
- ----------
- arrays : iterable
- Arrays to be reduced.
- ufunc : numpy.ufunc
- Binary universal function. Must have a signature of the form ufunc(x1,...)
- kwargs
- Keyword arguments are passed to ``ufunc``. The ``out`` parameter is ignored.
- Yields
- ------
- reduced : ndarray
- """
- arrays = iter(arrays)
- first = next(arrays)
- if kwargs[''axis''] not in range(first.ndim):
- raise ValueError(''Axis {} not supported on arrays of shape {}.''.format(kwargs[''axis''], first.shape))
- # Remove the out-parameter if provided.
- kwargs.pop(''out'', None)
- dtype = kwargs.get(''dtype'')
- if dtype is None:
- dtype = first.dtype
- axis_reduce = partial(ufunc.reduce, **kwargs)
- accumulator = np.atleast_1d(axis_reduce(first))
- yield accumulator
- # On the first pass of the following loop,accumulator is missing a dimensions
- # therefore,the stacking function cannot be ''concatenate''
- second = next(arrays)
- accumulator = np.stack([accumulator, np.atleast_1d(axis_reduce(second))], axis = -1)
- yield accumulator
- # On the second pass,the new dimensions exists,and thus we switch to
- # using concatenate.
- for array in arrays:
- reduced = np.expand_dims(np.atleast_1d(axis_reduce(array)), axis = accumulator.ndim - 1)
- accumulator = np.concatenate([accumulator, reduced], axis = accumulator.ndim - 1)
- yield accumulator
- def compute_group(cls, data, scales, **params):
- fun = params[''fun'']
- n = params[''n'']
- args = params[''args'']
- xlim = params[''xlim'']
- try:
- range_x = xlim or scales.x.dimension((0, 0))
- except AttributeError:
- raise PlotnineError(
- "Missing ''x'' aesthetic and ''xlim'' is {}".format(xlim))
- if not hasattr(fun, ''__call__''):
- raise PlotnineError(
- "stat_function requires parameter ''fun'' to be " +
- "a function or any other callable object")
- old_fun = fun
- if isinstance(args, (list, tuple)):
- def fun(x):
- return old_fun(x, *args)
- elif isinstance(args, dict):
- def fun(x):
- return old_fun(x, **args)
- elif args is not None:
- def fun(x):
- return old_fun(x, args)
- else:
- def fun(x):
- return old_fun(x)
- x = np.linspace(range_x[0], range_x[1], n)
- # continuous scale
- with suppress(AttributeError):
- x = scales.x.trans.inverse(x)
- # We kNow these can handle array-likes
- if isinstance(old_fun, (np.ufunc, np.vectorize)):
- y = fun(x)
- else:
- y = [fun(val) for val in x]
- new_data = pd.DataFrame({''x'': x, ''y'': y})
- return new_data
- def prepare_node(self, node, storage_map, compute_map, impl):
- # Postpone the ufunc building to the last minutes
- # NumPy ufunc support only up to 31 inputs.
- # But our c code support more.
- if (len(node.inputs) < 32 and
- (self.nfunc is None or
- self.scalar_op.nin != len(node.inputs)) and
- self.ufunc is None and
- impl == ''py''):
- ufunc = numpy.frompyfunc(self.scalar_op.impl,
- len(node.inputs),
- self.scalar_op.nout)
- if self.scalar_op.nin > 0:
- # We can reuse it for many nodes
- self.ufunc = ufunc
- else:
- node.tag.ufunc = ufunc
- # Numpy ufuncs will sometimes perform operations in
- # float16,in particular when the input is int8.
- # This is not something that we want,and we do not
- # do it in the C code,so we specify that the computation
- # should be carried out in the returned dtype.
- # This is done via the "sig" kwarg of the ufunc,its value
- # should be something like "ff->f",where the characters
- # represent the dtype of the inputs and outputs.
- # NumPy 1.10.1 raise an error when giving the signature
- # when the input is complex. So add it only when inputs is int.
- out_dtype = node.outputs[0].dtype
- if (out_dtype in float_dtypes and
- isinstance(self.nfunc, numpy.ufunc) and
- node.inputs[0].dtype in discrete_dtypes):
- char = numpy.sctype2char(out_dtype)
- sig = char * node.nin + ''->'' + char * node.nout
- node.tag.sig = sig
- node.tag.fake_node = Apply(
- self.scalar_op,
- [get_scalar_type(dtype=input.type.dtype).make_variable()
- for input in node.inputs],
- [get_scalar_type(dtype=output.type.dtype).make_variable()
- for output in node.outputs])
- self.scalar_op.prepare_node(node.tag.fake_node, None, impl)
- def perform(self, inp, out):
- input, = inp
- output, = out
- axis = self.axis
- if axis is None:
- axis = list(range(input.ndim))
- variable = input
- to_reduce = reversed(sorted(axis))
- if hasattr(self, ''acc_dtype'') and self.acc_dtype is not None:
- acc_dtype = self.acc_dtype
- else:
- acc_dtype = node.outputs[0].type.dtype
- if to_reduce:
- for dimension in to_reduce:
- # If it''s a zero-size array,use scalar_op.identity
- # if available
- if variable.shape[dimension] == 0:
- if hasattr(self.scalar_op, ''identity''):
- # Compute the shape of the output
- v_shape = list(variable.shape)
- del v_shape[dimension]
- variable = numpy.empty(tuple(v_shape),
- dtype=acc_dtype)
- variable.fill(self.scalar_op.identity)
- else:
- raise ValueError((
- "Input (%s) has zero-size on axis %s,but "
- "self.scalar_op (%s) has no attribute ''identity''"
- % (variable, dimension, self.scalar_op)))
- else:
- # Numpy 1.6 has a bug where you sometimes have to specify
- # "dtype=''object''" in reduce for it to work,if the ufunc
- # was built with "frompyfunc". We need to find out if we
- # are in one of these cases (only "object" is supported in
- # the output).
- if ((self.ufunc.ntypes == 1) and
- (self.ufunc.types[0][-1] == ''O'')):
- variable = self.ufunc.reduce(variable,
- dtype=''object'')
- else:
- variable = self.ufunc.reduce(variable,
- dtype=acc_dtype)
- variable = numpy.asarray(variable)
- if numpy.may_share_memory(variable, input):
- # perhaps numpy is cLever for reductions of size 1?
- # We don''t want this.
- variable = variable.copy()
- output[0] = theano._asarray(variable,
- dtype=node.outputs[0].type.dtype)
- else:
- # Force a copy
- output[0] = numpy.array(variable, copy=True,
- dtype=node.outputs[0].type.dtype)
AttributeError:Float' 对象没有属性日志 /TypeError: ufunc 'log' 不支持输入类型
如何解决AttributeError:Float'' 对象没有属性日志 /TypeError: ufunc ''log'' 不支持输入类型
我在一列(''2.4M'')中有一系列荧光强度数据。我试图通过获取列“2.4M”的 ln 创建一个新列“ln_2.4M”,但出现错误:
AttributeError: ''float'' 对象没有属性 ''log''
df["ln_2.4M"] = np.log(df["2.4M"])
我尝试使用 for 循环遍历“2.4M”列中每个荧光数据的日志:
ln2_4M = []
for x in df["2.4M"]:
ln2_4M = np.log(x)
print(ln2_4M)
尽管它正确地将 ln2_4M 打印为“2.4M”列的日志,但我无法使用该数据,因为它与 TypeError 一起给出: 输入类型不支持 ufunc ''log'',并且无法根据转换规则 ''''safe'' 将输入安全地强制转换为任何受支持的类型
不知道为什么? - 任何有助于了解正在发生的事情以及如何解决此问题的帮助表示赞赏。谢谢
解决方法
.
然后我尝试使用下面的方法,它奏效了:
df["2.4M"] = pd.to_numeric(df["2.4M"],errors = ''coerce'')
df["ln_24M"] = np.log(df["2.4M"])
matplotlib 错误:ufunc 循环不支持 float 类型的参数 0,该参数没有可调用的 rint 方法
如何解决matplotlib 错误:ufunc 循环不支持 float 类型的参数 0,该参数没有可调用的 rint 方法?
这是我的数据系列: df =
count
17 83396.142857
18 35970.000000
19 54082.428571
20 21759.714286
21 16899.571429
22 19870.571429
23 32491.285714
24 40425.285714
25 30780.285714
26 11923.428571
27 13698.571429
28 28028.000000
29 52575.000000
首先将其转换为 int 以避免出现任何问题:
df[''count''] = df[''count''].astype(int)
df.index = df.index.astype(int)
我正在尝试使用以下方法绘制数据:
_,ax = plt.subplots(1,2)
df.plot.pie(ax = ax[1],y = df[''count''])
plt.show()
但它一直抛出异常错误:
Type:
TypeError
Message:
loop of ufunc does not support argument 0 of type float which has no callable rint method
Stacktrace:
File "/Users/eyshikaagarwal/.virtualenvs/env-hss-ml/lib/python3.8/site-packages/matplotlib/backends/backend_macosx.py",line 61,in _draw
self.figure.draw(renderer)
File "/Users/eyshikaagarwal/.virtualenvs/env-hss-ml/lib/python3.8/site-packages/matplotlib/artist.py",line 41,in draw_wrapper
return draw(artist,renderer,*args,**kwargs)
File "/Users/eyshikaagarwal/.virtualenvs/env-hss-ml/lib/python3.8/site-packages/matplotlib/figure.py",line 1863,in draw
mimage._draw_list_compositing_images(
File "/Users/eyshikaagarwal/.virtualenvs/env-hss-ml/lib/python3.8/site-packages/matplotlib/image.py",line 131,in _draw_list_compositing_images
a.draw(renderer)
File "/Users/eyshikaagarwal/.virtualenvs/env-hss-ml/lib/python3.8/site-packages/matplotlib/artist.py",**kwargs)
File "/Users/eyshikaagarwal/.virtualenvs/env-hss-ml/lib/python3.8/site-packages/matplotlib/cbook/deprecation.py",line 411,in wrapper
return func(*inner_args,**inner_kwargs)
File "/Users/eyshikaagarwal/.virtualenvs/env-hss-ml/lib/python3.8/site-packages/matplotlib/axes/_base.py",line 2747,in draw
mimage._draw_list_compositing_images(renderer,self,artists)
File "/Users/eyshikaagarwal/.virtualenvs/env-hss-ml/lib/python3.8/site-packages/matplotlib/image.py",**kwargs)
File "/Users/eyshikaagarwal/.virtualenvs/env-hss-ml/lib/python3.8/site-packages/matplotlib/axis.py",line 1164,in draw
ticks_to_draw = self._update_ticks()
File "/Users/eyshikaagarwal/.virtualenvs/env-hss-ml/lib/python3.8/site-packages/matplotlib/axis.py",line 1022,in _update_ticks
major_labels = self.major.formatter.format_ticks(major_locs)
File "/Users/eyshikaagarwal/.virtualenvs/env-hss-ml/lib/python3.8/site-packages/matplotlib/ticker.py",line 249,in format_ticks
self.set_locs(values)
File "/Users/eyshikaagarwal/.virtualenvs/env-hss-ml/lib/python3.8/site-packages/matplotlib/ticker.py",line 782,in set_locs
self._set_format()
File "/Users/eyshikaagarwal/.virtualenvs/env-hss-ml/lib/python3.8/site-packages/matplotlib/ticker.py",line 884,in _set_format
if np.abs(locs - np.round(locs,decimals=sigfigs)).max() < thresh:
File "<__array_function__ internals>",line 5,in round_
File "/Users/eyshikaagarwal/.virtualenvs/env-hss-ml/lib/python3.8/site-packages/numpy/core/fromnumeric.py",line 3739,in round_
return around(a,decimals=decimals,out=out)
File "<__array_function__ internals>",in around
File "/Users/eyshikaagarwal/.virtualenvs/env-hss-ml/lib/python3.8/site-packages/numpy/core/fromnumeric.py",line 3314,in around
return _wrapfunc(a,''round'',out=out)
File "/Users/eyshikaagarwal/.virtualenvs/env-hss-ml/lib/python3.8/site-packages/numpy/core/fromnumeric.py",line 66,in _wrapfunc
return _wrapit(obj,method,**kwds)
File "/Users/eyshikaagarwal/.virtualenvs/env-hss-ml/lib/python3.8/site-packages/numpy/core/fromnumeric.py",line 43,in _wrapit
result = getattr(asarray(obj),method)(*args,**kwds)
[0m
任何建议..这里有什么问题? 我已经花了几个小时来理解和修复它,但还没有运气。 任何帮助都会很棒。
更新:
感谢@ehsan 为饼图提供的答案,但是当我使用以下方法进行简单的折线图时,我仍然遇到相同的错误:
plot_kwargs = {''xticks'': df.index,''grid'': True,''color'': ''Red'',''title'' : "Average "}
df.plot(ylabel = ''Average No. of tracks '',**plot_kwargs)
这与我使用此代码遇到的错误完全相同,我不明白为什么。我什至在这里也使用了 y=''count''
,只是为了看看是否有任何变化,但它的错误是相同的。
任何见解都会有所帮助
谢谢!
解决方法
你想要这个:
_,ax = plt.subplots(1,2)
df.plot.pie(ax = ax[1],y = ''count'')
plt.show()
错误是您使用了 y=df[''count'']
而不是简单的 y=''count''
。您正在使用熊猫绘图,无需发送列值,只需发送列名。此外,您不需要将 dtype 转换为 int
,除非您想这样做。
输出:
NetCDF Python 无法将 ufunc 'multiply' 输出从 dtype('如何解决NetCDF Python 无法将 ufunc ''multiply'' 输出从 dtype(''<U32'') 转换为 dtype(''float32'')
我正在尝试使用 xarray 或 netCDF4 库将 netCDF 加载到数据帧中。通常这不会成为问题,因为我的 netCDF 大部分带有 Float32 中的纬度、经度和数据值。我假设我的错误是因为我有一些数据类型被作为 Float64 传递。
我目前在加载时从两个库中收到相同的错误,大概是因为它们都使用了 numpy。我没有做任何数学运算 - 只是加载。
numpy.core._exceptions.UFuncTypeError: Cannot cast ufunc ''multiply'' output from dtype(''<U32'')
to dtype(''float32'') with casting rule ''same_kind''
使用 print(netCDF4.Dataset("d:\\netdcdf.nc") 产生以下描述:
dimensions(sizes): time(1),lon(841),lat(681)
variables(dimensions): float64 time(time),float64 lon(lon),float64 lat(lat),int32 crs(),float32 deadpool(time,lat,lon)
我的脚本如下,包括 xarray 和 netCDF4 的加载示例。
#This file is designed to convert netcdf files to the BOM standard format.
import netCDF4
import pandas as pd
import xarray as xr
def main():
pass
if __name__ == ''__main__'':
inputfile = ''D:\\\\Temp\\\\WeatherDownloads\\\\Weather\\\\deadpool.aus.nc''
#xarray setup,debug and load
ncx = xr.open_dataset(inputfile)
ncdf = ncx.deadpool.to_dataframe() #fails here if we use xarray
print(ncdf.head(10))
#NetCDF4 setup,debug and load
nc = netCDF4.Dataset(inputfile,mode=''r'')
nc.variables.keys()
lat = nc.variables[''lat''][:]
lon = nc.variables[''lon''][:]
time = nc.variables[''time'']
datavar = nc.variables[''deadpool''][:] #fails here if we use netCDF4
print("The dtype of lat is: " + str(dtype(lat)))
print("The dtype of lon is: " + str(dtype(lon)))
print("The dtype of time is: " + str(dtype(time)))
print("The dtype of datavar is: " + str(dtype(datavar)))
data_ts = pd.Series(datavar,index=time)
print(data_ts.head(10))

Numpy ufunc.reduce 比在 ndarray.tolist 之后应用原生 python reduce 慢?
如何解决Numpy ufunc.reduce 比在 ndarray.tolist 之后应用原生 python reduce 慢?
在玩 Python 列表时,我喜欢使用许多函数式编程特性。当我为大数据集切换到 Numpy 时,我希望它比 ndarray.tolist()
上的原生 Python 列表操作更高效,因为它的存储方式不同。
因此,当我尝试在 Numpy 数组上应用 map
、reduce
、filter
这样的 FP 事物时,我首先在 Numpy 的文档中搜索一些“优化的事物”。我得到的是 numpy.ufunc.reduce 这似乎是正确的事情。但是,出于好奇,我对这两种方法都做了一个简单的测试:
- 使用 Numpy 减少
import numpy as np
a = np.array(range(100000000))
adf = lambda res,a: res + a
u_adf = np.frompyfunc(adf,2,1)
print(u_adf.reduce(a,initial=0))
- 使用
ndarray.tolist()
,然后使用Python native reduce
import numpy as np
from functools import reduce
a = np.array(range(100000000))
adf = lambda res,a: res + a
print(reduce(adf,a.tolist(),0))
最意想不到的事情来了:
> python 1.py
4999999950000000
python 1.py 28.00s user 5.71s system 102% cpu 32.925 total
> python 2.py
4999999950000000
python 2.py 26.38s user 6.38s system 103% cpu 31.792 total
所谓“愚蠢”的方法,其实是更有效的方法?
怎么会这样?任何人都可以为我解释一下吗?并希望就在 Numpy 数组上使用函数式编程特性提供一些建议。
欣赏^_^
关于Python numpy 模块-ufunc() 实例源码和python中numpy模块的介绍已经告一段落,感谢您的耐心阅读,如果想了解更多关于AttributeError:Float' 对象没有属性日志 /TypeError: ufunc 'log' 不支持输入类型、matplotlib 错误:ufunc 循环不支持 float 类型的参数 0,该参数没有可调用的 rint 方法、NetCDF Python 无法将 ufunc 'multiply' 输出从 dtype('的相关信息,请在本站寻找。
本文标签:
如何解决NetCDF Python 无法将 ufunc ''multiply'' 输出从 dtype(''<U32'') 转换为 dtype(''float32'')
我正在尝试使用 xarray 或 netCDF4 库将 netCDF 加载到数据帧中。通常这不会成为问题,因为我的 netCDF 大部分带有 Float32 中的纬度、经度和数据值。我假设我的错误是因为我有一些数据类型被作为 Float64 传递。
我目前在加载时从两个库中收到相同的错误,大概是因为它们都使用了 numpy。我没有做任何数学运算 - 只是加载。
numpy.core._exceptions.UFuncTypeError: Cannot cast ufunc ''multiply'' output from dtype(''<U32'')
to dtype(''float32'') with casting rule ''same_kind''
使用 print(netCDF4.Dataset("d:\\netdcdf.nc") 产生以下描述:
dimensions(sizes): time(1),lon(841),lat(681)
variables(dimensions): float64 time(time),float64 lon(lon),float64 lat(lat),int32 crs(),float32 deadpool(time,lat,lon)
我的脚本如下,包括 xarray 和 netCDF4 的加载示例。
#This file is designed to convert netcdf files to the BOM standard format.
import netCDF4
import pandas as pd
import xarray as xr
def main():
pass
if __name__ == ''__main__'':
inputfile = ''D:\\\\Temp\\\\WeatherDownloads\\\\Weather\\\\deadpool.aus.nc''
#xarray setup,debug and load
ncx = xr.open_dataset(inputfile)
ncdf = ncx.deadpool.to_dataframe() #fails here if we use xarray
print(ncdf.head(10))
#NetCDF4 setup,debug and load
nc = netCDF4.Dataset(inputfile,mode=''r'')
nc.variables.keys()
lat = nc.variables[''lat''][:]
lon = nc.variables[''lon''][:]
time = nc.variables[''time'']
datavar = nc.variables[''deadpool''][:] #fails here if we use netCDF4
print("The dtype of lat is: " + str(dtype(lat)))
print("The dtype of lon is: " + str(dtype(lon)))
print("The dtype of time is: " + str(dtype(time)))
print("The dtype of datavar is: " + str(dtype(datavar)))
data_ts = pd.Series(datavar,index=time)
print(data_ts.head(10))
Numpy ufunc.reduce 比在 ndarray.tolist 之后应用原生 python reduce 慢?
如何解决Numpy ufunc.reduce 比在 ndarray.tolist 之后应用原生 python reduce 慢?
在玩 Python 列表时,我喜欢使用许多函数式编程特性。当我为大数据集切换到 Numpy 时,我希望它比 ndarray.tolist()
上的原生 Python 列表操作更高效,因为它的存储方式不同。
因此,当我尝试在 Numpy 数组上应用 map
、reduce
、filter
这样的 FP 事物时,我首先在 Numpy 的文档中搜索一些“优化的事物”。我得到的是 numpy.ufunc.reduce 这似乎是正确的事情。但是,出于好奇,我对这两种方法都做了一个简单的测试:
- 使用 Numpy 减少
import numpy as np
a = np.array(range(100000000))
adf = lambda res,a: res + a
u_adf = np.frompyfunc(adf,2,1)
print(u_adf.reduce(a,initial=0))
- 使用
ndarray.tolist()
,然后使用Python native reduce
import numpy as np
from functools import reduce
a = np.array(range(100000000))
adf = lambda res,a: res + a
print(reduce(adf,a.tolist(),0))
最意想不到的事情来了:
> python 1.py
4999999950000000
python 1.py 28.00s user 5.71s system 102% cpu 32.925 total
> python 2.py
4999999950000000
python 2.py 26.38s user 6.38s system 103% cpu 31.792 total
所谓“愚蠢”的方法,其实是更有效的方法?
怎么会这样?任何人都可以为我解释一下吗?并希望就在 Numpy 数组上使用函数式编程特性提供一些建议。
欣赏^_^
关于Python numpy 模块-ufunc() 实例源码和python中numpy模块的介绍已经告一段落,感谢您的耐心阅读,如果想了解更多关于AttributeError:Float' 对象没有属性日志 /TypeError: ufunc 'log' 不支持输入类型、matplotlib 错误:ufunc 循环不支持 float 类型的参数 0,该参数没有可调用的 rint 方法、NetCDF Python 无法将 ufunc 'multiply' 输出从 dtype('
本文标签: