在本文中,您将会了解到关于Pythonnumpy模块-nanmin()实例源码的新资讯,同时我们还将为您解释python中numpy模块的相关在本文中,我们将带你探索Pythonnumpy模块-nan
在本文中,您将会了解到关于Python numpy 模块-nanmin() 实例源码的新资讯,同时我们还将为您解释python中numpy模块的相关在本文中,我们将带你探索Python numpy 模块-nanmin() 实例源码的奥秘,分析python中numpy模块的特点,并给出一些关于Jupyter 中的 Numpy 在打印时出错(Python 版本 3.8.8):TypeError: 'numpy.ndarray' object is not callable、numpy.random.random & numpy.ndarray.astype & numpy.arange、numpy.ravel()/numpy.flatten()/numpy.squeeze()、Numpy:数组创建 numpy.arrray() , numpy.arange()、np.linspace ()、数组基本属性的实用技巧。
本文目录一览:- Python numpy 模块-nanmin() 实例源码(python中numpy模块)
- Jupyter 中的 Numpy 在打印时出错(Python 版本 3.8.8):TypeError: 'numpy.ndarray' object is not callable
- numpy.random.random & numpy.ndarray.astype & numpy.arange
- numpy.ravel()/numpy.flatten()/numpy.squeeze()
- Numpy:数组创建 numpy.arrray() , numpy.arange()、np.linspace ()、数组基本属性
Python numpy 模块-nanmin() 实例源码(python中numpy模块)
Python numpy 模块,nanmin() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.nanmin()。
- def normalize_array (solution, prediction):
- '''''' Use min and max of solution as scaling factors to normalize prediction,
- then threshold it to [0,1]. Binarize solution to {0,1}.
- This allows applying classification scores to all cases.
- In principle,this should not do anything to properly formatted
- classification inputs and outputs.''''''
- # Binarize solution
- sol=np.ravel(solution) # convert to 1-d array
- maxi = np.nanmax((filter(lambda x: x != float(''inf''), sol))) # Max except NaN and Inf
- mini = np.nanmin((filter(lambda x: x != float(''-inf''), sol))) # Mini except NaN and Inf
- if maxi == mini:
- print(''Warning,cannot normalize'')
- return [solution, prediction]
- diff = maxi - mini
- mid = (maxi + mini)/2.
- new_solution = np.copy(solution)
- new_solution[solution>=mid] = 1
- new_solution[solution<mid] = 0
- # normalize and threshold predictions (takes effect only if solution not in {0,1})
- new_prediction = (np.copy(prediction) - float(mini))/float(diff)
- new_prediction[new_prediction>1] = 1 # and if predictions exceed the bounds [0,1]
- new_prediction[new_prediction<0] = 0
- # Make probabilities smoother
- #new_prediction = np.power(new_prediction,(1./10))
- return [new_solution, new_prediction]
- def normalize_array (solution, new_prediction]
- def normalize_array (solution, new_prediction]
- def sanitize_array(array):
- """
- Replace NaN and Inf (there should not be any!)
- :param array:
- :return:
- """
- a = np.ravel(array)
- #maxi = np.nanmax((filter(lambda x: x != float(''inf''),a))
- # ) # Max except NaN and Inf
- #mini = np.nanmin((filter(lambda x: x != float(''-inf''),a))
- # ) # Mini except NaN and Inf
- maxi = np.nanmax(a[np.isfinite(a)])
- mini = np.nanmin(a[np.isfinite(a)])
- array[array == float(''inf'')] = maxi
- array[array == float(''-inf'')] = mini
- mid = (maxi + mini) / 2
- array[np.isnan(array)] = mid
- return array
- def min_max(self, mask=None):
- """Get the minimum and maximum value in this data.
- If a mask is provided we get the min and max value within the given mask.
- Infinities and NaN''s are ignored by this algorithm.
- Args:
- mask (ndarray): the mask,we only include elements for which the mask > 0
- Returns:
- tuple: (min,max) the minimum and maximum values
- """
- if mask is not None:
- roi = mdt.create_roi(self.data, mask)
- return np.nanmin(roi), np.nanmax(roi)
- return np.nanmin(self.data), np.nanmax(self.data)
- def test_extrema():
- for nprocs in [1, 2, 4, 8]:
- ds = fake_random_ds(16, nprocs = nprocs, fields = ("density",
- "veLocity_x", "veLocity_y", "veLocity_z"))
- for sp in [ds.sphere("c", (0.25, ''unitary'')), ds.r[0.5,:,:]]:
- mi, ma = sp.quantities["Extrema"]("density")
- assert_equal(mi, np.nanmin(sp["density"]))
- assert_equal(ma, np.nanmax(sp["density"]))
- dd = ds.all_data()
- mi, ma = dd.quantities["Extrema"]("density")
- assert_equal(mi, np.nanmin(dd["density"]))
- assert_equal(ma, np.nanmax(dd["density"]))
- sp = ds.sphere("max", ''unitary''))
- assert_equal(np.any(np.isnan(sp["radial_veLocity"])), False)
- mi, ma = dd.quantities["Extrema"]("radial_veLocity")
- assert_equal(mi, np.nanmin(dd["radial_veLocity"]))
- assert_equal(ma, np.nanmax(dd["radial_veLocity"]))
- def local_entropy(ocl_ctx, img, window_radius, num_bins=8):
- """ compute local entropy using a sliding window """
- mf = cl.mem_flags
- cl_queue = cl.CommandQueue(ocl_ctx)
- img_np = np.array(img).astype(np.float32)
- img_buf = cl.Buffer(ocl_ctx, mf.READ_ONLY | mf.copY_HOST_PTR, hostbuf=img_np)
- min_val = np.nanmin(img)
- max_val = np.nanmax(img)
- entropy = np.zeros_like(img,dtype=np.float32)
- dest_buf = cl.Buffer(ocl_ctx, mf.WRITE_ONLY, entropy.nbytes)
- cl_dir = os.path.dirname(__file__)
- cl_filename = cl_dir + ''/cl/local_entropy.cl''
- with open(cl_filename, ''r'') as fd:
- clstr = fd.read()
- prg = cl.Program(ocl_ctx, clstr).build()
- prg.local_entropy(cl_queue, entropy.shape, None,
- img_buf, dest_buf,
- np.int32(img.shape[1]), np.int32(img.shape[0]),
- np.int32(window_radius), np.int32(num_bins),
- np.float32(min_val), np.float32(max_val))
- cl.enqueue_copy(cl_queue, entropy, dest_buf)
- cl_queue.finish()
- return entropy
- def minmax(X):
- """
- Returns the MinMax Semivariance of sample X.
- X has to be an even-length array of point pairs like: x1,x1+h,x2,x2+h,...,xn,xn+h.
- :param X:
- :return:
- """
- _X = np.asarray(X)
- if any([isinstance(_, list) or isinstance(_, np.ndarray) for _ in _X]):
- return [minmax(_) for _ in _X]
- # check even
- if len(_X) % 2 > 0:
- raise ValueError(''The sample does not have an even length: {}''.format(_X))
- return (np.nanmax(_X) - np.nanmin(_X)) / np.nanmean(_X)
- def test_FmtHeatmap__get_min_max_from_selected_cell_values_with_cache():
- df_pn = df - 5.
- cache = {}
- fmt = pbtf.FmtHeatmap(cache=cache)
- res = fmt._get_min_max_from_selected_cell_values(None, df_pn)
- assert len(cache) == 1 and (None, None) in cache.keys()
- assert res == (np.nanmin(df_pn), np.nanmax(df_pn))
- min_value, max_value = np.nanmin(df.loc[[''a''], [''aa'', ''bb'']]), np.nanmax(df.loc[[''a''], ''bb'']])
- res = fmt._get_min_max_from_selected_cell_values([''a''], ''bb''], df)
- assert len(cache) == 2 and (frozenset([''a'']), frozenset([''aa'', ''bb''])) in cache.keys()
- assert res == (min_value, max_value)
- res = fmt._get_min_max_from_selected_cell_values([''a''], max_value)
- def test_FmtHeatmap__get_min_max_from_selected_cell_values_without_cache():
- df_pn = df - 5.
- cache = None
- fmt = pbtf.FmtHeatmap(cache=cache)
- res = fmt._get_min_max_from_selected_cell_values(None, df_pn)
- assert cache is None
- assert res == (np.nanmin(df_pn), df)
- assert cache is None
- assert res == (min_value, max_value)
- def depth_callback(self,data):
- try:
- self.depth_image= self.br.imgmsg_to_cv2(data, desired_encoding="passthrough")
- except CvBridgeError as e:
- print(e)
- # print "depth"
- depth_min = np.nanmin(self.depth_image)
- depth_max = np.nanmax(self.depth_image)
- depth_img = self.depth_image.copy()
- depth_img[np.isnan(self.depth_image)] = depth_min
- depth_img = ((depth_img - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8)
- cv2.imshow("Depth Image", depth_img)
- cv2.waitKey(5)
- # stream = open("/home/chentao/depth_test.yaml","w")
- # data = {''img'':depth_img.tolist()}
- # yaml.dump(data,stream)
- def depth_callback(self,stream)
- def depth_callback(self, depth_img)
- cv2.waitKey(5)
- def basemap_raster_mercator(lon, lat, grid, cmin, cmax, cmap_name):
- # longitude/latitude extent
- lons = (np.amin(lon), np.amax(lon))
- lats = (np.amin(lat), np.amax(lat))
- # construct spherical mercator projection for region of interest
- m = Basemap(projection=''merc'',llcrnrlat=lats[0], urcrnrlat=lats[1],
- llcrnrlon=lons[0],urcrnrlon=lons[1])
- #vmin,vmax = np.nanmin(grid),np.nanmax(grid)
- masked_grid = np.ma.array(grid,mask=np.isnan(grid))
- fig = plt.figure(frameon=False,figsize=(12,8),dpi=72)
- plt.axis(''off'')
- cmap = mpl.cm.get_cmap(cmap_name)
- m.pcolormesh(lon,lat,masked_grid,latlon=True,cmap=cmap,vmin=cmin,vmax=cmax)
- str_io = StringIO.StringIO()
- plt.savefig(str_io,bBox_inches=''tight'',format=''png'',pad_inches=0,transparent=True)
- plt.close()
- numpy_bounds = [ (lons[0],lats[0]),(lons[1],lats[1]),(lons[0],lats[1]) ]
- float_bounds = [ (float(x), float(y)) for x,y in numpy_bounds ]
- return str_io.getvalue(), float_bounds
- def basemap_barbs_mercator(u,v,lon):
- # lon/lat extents
- lons = (np.amin(lon),np.nanmax(grid)
- fig = plt.figure(frameon=False,dpi=72*4)
- plt.axis(''off'')
- m.quiver(lon,u,latlon=True)
- str_io = StringIO.StringIO()
- plt.savefig(str_io, float_bounds
- def setSymColormap(self):
- cmap = {''ticks'':
- [[0, (106, 0, 31, 255)],
- [.5, (255, 255,
- [1., (8, 54, 104, 255)]],
- ''mode'': ''rgb''}
- cmap = {''ticks'':
- [[0, (172, 56, 56)], (51, 53, 120)]],
- ''mode'': ''rgb''}
- lvl_min = lvl_max = 0
- for plot in self.plots:
- plt_min = num.nanmin(plot.data)
- plt_max = num.nanmax(plot.data)
- lvl_max = lvl_max if plt_max < lvl_max else plt_max
- lvl_min = lvl_min if plt_min > lvl_min else plt_min
- abs_range = max(abs(lvl_min), abs(lvl_max))
- self.gradient.restoreState(cmap)
- self.setLevels(-abs_range, abs_range)
- def setSymColormap(self):
- cmap = {''ticks'':
- [[0., (0,
- [1e-3,
- ''mode'': ''rgb''}
- cmap = {''ticks'':
- [[0., 0)],
- ''mode'': ''rgb''}
- lvl_min = num.nanmin(self._plot.data)
- lvl_max = num.nanmax(self._plot.data)
- abs_range = max(abs(lvl_min), abs_range)
- def setArray(self, incomingArray, copy=False):
- """
- You can use the self.array directly but if you want to copy from one array
- into a raster we suggest you do it this way
- :param incomingArray:
- :return:
- """
- masked = isinstance(self.array, np.ma.MaskedArray)
- if copy:
- if masked:
- self.array = np.ma.copy(incomingArray)
- else:
- self.array = np.ma.masked_invalid(incomingArray, copy=True)
- else:
- if masked:
- self.array = incomingArray
- else:
- self.array = np.ma.masked_invalid(incomingArray)
- self.rows = self.array.shape[0]
- self.cols = self.array.shape[1]
- self.min = np.nanmin(self.array)
- self.max = np.nanmax(self.array)
- def _choose_cov(self, cov_type, **cov_config):
- """Return covariance estimator reformat clusters"""
- cov_est = self._cov_estimators[cov_type]
- if cov_type != ''clustered'':
- return cov_est, cov_config
- cov_config_upd = {k: v for k, v in cov_config.items()}
- clusters = cov_config.get(''clusters'', None)
- if clusters is not None:
- clusters = self.reformat_clusters(clusters).copy()
- cluster_max = np.nanmax(clusters.values3d, axis=1)
- delta = cluster_max - np.nanmin(clusters.values3d, axis=1)
- if np.any(delta != 0):
- raise ValueError(''clusters must not vary within an entity'')
- index = clusters.panel.minor_axis
- reindex = clusters.entities
- clusters = pd.DataFrame(cluster_max.T, index=index, columns=clusters.vars)
- clusters = clusters.loc[reindex].astype(np.int64)
- cov_config_upd[''clusters''] = clusters
- return cov_est, cov_config_upd
- def get_bBox(self):
- """
- Returns boundary Box for the coordinates. Useful for setting up
- the map extent for plotting on a map.
- :return tuple: corner coordinates (llcrnrlat,urcrnrlat,llcrnrlon,
- urcrnrlon)
- """
- x, y, z = zip(self)
- llcrnrlat = np.nanmin(y)
- urcrnrlat = np.nanmax(y)
- llcrnrlon = np.nanmin(x)
- urcrnrlon = np.nanmax(x)
- return (llcrnrlat,
- urcrnrlat,
- llcrnrlon,
- urcrnrlon)
- def visRenderedViews(self,outDir,nViews=0):
- pt = Imath.PixelType(Imath.PixelType.FLOAT)
- renders = sorted(glob.glob(outDir + ''/render_*.png''))
- if (nViews > 0) and (nViews < len(renders)):
- renders = [renders[ix] for ix in range(nViews)]
- for render in renders:
- print render
- rgbIm = scipy.misc.imread(render)
- dMap = loadDepth(render.replace(''render_'',''depth_''))
- plt.figure(figsize=(12,6))
- plt.subplot(121)
- plt.imshow(rgbIm)
- dMap[dMap>=10] = np.nan
- plt.subplot(122)
- plt.imshow(dMap)
- print(np.nanmax(dMap),np.nanmin(dMap))
- plt.show()
- def find_bBox(t):
- # given a table t find the bounding Box of the ellipses for the regions
- Boxes=[]
- for r in t:
- a=r[''Maj'']/scale
- b=r[''Min'']/scale
- th=(r[''PA'']+90)*np.pi/180.0
- dx=np.sqrt((a*np.cos(th))**2.0+(b*np.sin(th))**2.0)
- dy=np.sqrt((a*np.sin(th))**2.0+(b*np.cos(th))**2.0)
- Boxes.append([r[''RA'']-dx/np.cos(r[''DEC'']*np.pi/180.0),
- r[''RA'']+dx/np.cos(r[''DEC'']*np.pi/180.0),
- r[''DEC'']-dy, r[''DEC'']+dy])
- Boxes=np.array(Boxes)
- minra=np.nanmin(Boxes[:,0])
- maxra=np.nanmax(Boxes[:,1])
- mindec=np.nanmin(Boxes[:,2])
- maxdec=np.nanmax(Boxes[:,3])
- ra=np.mean((minra,maxra))
- dec=np.mean((mindec,maxdec))
- size=1.2*3600.0*np.max((maxdec-mindec,(maxra-minra)*np.cos(dec*np.pi/180.0)))
- return ra,dec,size
- def VshGR(GRlog,itmin,itmax): # Usando o perfil GR
- GRmin = np.nanmin(GRlog)
- GRminInt = GRlog[(GRlog<=(GRmin*(1+itmin/100)))] # Valores do GRmin
- GRminm = np.mean(GRminInt) # Media dos valores de GRmin
- GRmax = np.nanmax(GRlog)
- GRmaxInt = GRlog[(GRlog>=(GRmax*(1-itmax/100)))] # Valores de GRmax
- GRmaxm = np.mean(GRmaxInt) # Media dos valores de GRmax
- Vsh = 100*(GRlog-GRminm)/(GRmaxm-GRminm) # Volume de argila
- for i in range(len(Vsh)):
- if (Vsh[i] > 100):
- Vsh[i] = 100
- elif (Vsh[i] < 0):
- Vsh[i] = 0
- print GRmin, GRminm, GRmax, GRmaxm, np.nanmin(Vsh), np.nanmax(Vsh)
- return Vsh
- def VshGR(GRlog, np.nanmax(Vsh)
- return Vsh
- def distance_curves(x, ys, q1):
- """
- distances to the curves.
- :param x: x values of curves (they have to be sorted).
- :param ys: y values of multiple curves sharing x values.
- :param q1: a point to measure distance to.
- :return:
- """
- # convert curves into a series of startpoints and endpoints
- xp = rolling_window(x, 2)
- ysp = rolling_window(ys, 2)
- r = np.nanmin(distance_line_segment(xp[:, 0], ysp[:, :,
- xp[:, 1],
- q1[0], q1[1]), axis=1)
- return r
- def set_marker_size(self, attr, update=True):
- try:
- self._size_attr = variable = self.data.domain[attr]
- if len(self.data) == 0:
- raise Exception
- except Exception:
- self._size_attr = None
- self._legend_sizes = []
- else:
- assert variable.is_continuous
- self._raw_sizes = values = self.data.get_column_view(variable)[0].astype(float)
- # Note,[5,60] is also hardcoded in legend-size-indicator.svg
- self._sizes = scale(values, 5, 60).astype(np.uint8)
- min = np.nanmin(values)
- self._legend_sizes = self._legend_values(variable,
- [min, np.nanmax(values)]) if not np.isnan(min) else []
- finally:
- if update:
- self.redraw_markers_overlay_image(new_image=True)
- def sanitize_array(array):
- '''''' Replace NaN and Inf (there should not be any!)''''''
- a=np.ravel(array)
- maxi = np.nanmax((filter(lambda x: x != float(''inf''), a))) # Max except NaN and Inf
- mini = np.nanmin((filter(lambda x: x != float(''-inf''), a))) # Mini except NaN and Inf
- array[array==float(''inf'')]=maxi
- array[array==float(''-inf'')]=mini
- mid = (maxi + mini)/2
- array[np.isnan(array)]=mid
- return array
- def frame_to_series(self, field, frame, columns=None):
- """
- Convert a frame with a DatetimeIndex and sid columns into a series with
- a sid index,using the aggregator defined by the given field.
- """
- if isinstance(frame, pd.DataFrame):
- columns = frame.columns
- frame = frame.values
- if not len(frame):
- return pd.Series(
- data=(0 if field == ''volume'' else np.nan),
- index=columns,
- ).values
- if field in [''price'', ''close'']:
- # shortcircuit for full last row
- vals = frame[-1]
- if np.all(~np.isnan(vals)):
- return vals
- return ffill(frame)[-1]
- elif field == ''open'':
- return bfill(frame)[0]
- elif field == ''volume'':
- return np.nansum(frame, axis=0)
- elif field == ''high'':
- return np.nanmax(frame, axis=0)
- elif field == ''low'':
- return np.nanmin(frame, axis=0)
- else:
- raise ValueError("UnkNown field {}".format(field))
- def extract_img_background(img_array,
- custom_limits=None,
- median_diffbelow=200.0,
- image_min=None):
- ''''''
- This extracts the background of the image array provided:
- - masks the array to only values between the median and the min of flux
- - then returns the median value in 3 x 3 stamps.
- img_array = image to find the background for
- custom_limits = use this to provide custom median and min limits for the
- background extraction
- median_diffbelow = subtract this value from the median to get the upper
- bound for background extraction
- image_min = use this value as the lower bound for background extraction
- ''''''
- if not custom_limits:
- backmax = np.median(img_array)-median_diffbelow
- backmin = image_min if image_min is not None else np.nanmin(img_array)
- else:
- backmin, backmax = custom_limits
- masked = npma.masked_outside(img_array, backmin, backmax)
- backmasked = npma.median(masked)
- return backmasked
- ## IMAGE SECTION FUNCTIONS ##
- def quickMinMax(self, data):
- """
- Estimate the min/max values of *data* by subsampling.
- """
- while data.size > 1e6:
- ax = np.argmax(data.shape)
- sl = [slice(None)] * data.ndim
- sl[ax] = slice(None, 2)
- data = data[sl]
- return nanmin(data), nanmax(data)
- def dataBounds(self, ax, frac=1.0, orthoRange=None):
- if frac >= 1.0 and orthoRange is None and self.bounds[ax] is not None:
- return self.bounds[ax]
- #self.prepareGeometryChange()
- if self.data is None or len(self.data) == 0:
- return (None, None)
- if ax == 0:
- d = self.data[''x'']
- d2 = self.data[''y'']
- elif ax == 1:
- d = self.data[''y'']
- d2 = self.data[''x'']
- if orthoRange is not None:
- mask = (d2 >= orthoRange[0]) * (d2 <= orthoRange[1])
- d = d[mask]
- d2 = d2[mask]
- if frac >= 1.0:
- self.bounds[ax] = (np.nanmin(d) - self._maxSpotWidth*0.7072, np.nanmax(d) + self._maxSpotWidth*0.7072)
- return self.bounds[ax]
- elif frac <= 0.0:
- raise Exception("Value for parameter ''frac'' must be > 0. (got %s)" % str(frac))
- else:
- mask = np.isfinite(d)
- d = d[mask]
- return np.percentile(d, [50 * (1 - frac), 50 * (1 + frac)])
- def quickMinMax(self, nanmax(data)
- def normalize_data(self, values):
- normalized_values = copy.deepcopy(values)
- data = np.array(values, dtype=float)[:, 0:5]
- data_min = np.nanmin(data, 0)
- data_max = np.nanmax(data, 0)
- print data_min
- print data_max
- for i in range(len(values)):
- for j in range(5):
- normalized_values[i][j] = np.abs(values[i][j] - data_min[j]) / np.abs(data_max[j] - data_min[j])
- return normalized_values, data_min, data_max
- def writeBinData(out_file, i, GenotypeData, scoreList, NumInfoSites):
- num_lines = len(GenotypeData.accessions)
- (likeliscore, likeliHoodratio) = snpmatch.calculate_likelihoods(scoreList, NumInfoSites)
- if len(likeliscore) > 0:
- NumAmb = np.where(likeliHoodratio < snpmatch.lr_thres)[0]
- if len(NumAmb) >= 1 and len(NumAmb) < num_lines:
- try:
- nextLikeli = np.nanmin(likeliHoodratio[np.where(likeliHoodratio > snpmatch.lr_thres)[0]])
- except:
- nextLikeli = 1
- for k in NumAmb:
- score = float(scoreList[k])/NumInfoSites[k]
- out_file.write("%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n" % (GenotypeData.accessions[k], int(scoreList[k]), NumInfoSites[k], score, likeliscore[k], nextLikeli, len(NumAmb), i+1))
- def image_as_uint8(im):
- """ Convert the given image to uint8
- If the dtype is already uint8,it is returned as-is. If the image
- is float,and all values are between 0 and 1,the values are
- multiplied by 255. In all other situations,the values are scaled
- such that the minimum value becomes 0 and the maximum value becomes
- 255.
- """
- if not isinstance(im, np.ndarray):
- raise ValueError(''image must be a numpy array'')
- dtype_str = str(im.dtype)
- # Already uint8?
- if dtype_str == ''uint8'':
- return im
- # Handle float
- mi, ma = np.nanmin(im), np.nanmax(im)
- if dtype_str.startswith(''float''):
- if mi >= 0 and ma <= 1:
- mi, ma = 0, 1
- # Now make float copy before we scale
- im = im.astype(''float32'')
- # Scale the values between 0 and 255
- if np.isfinite(mi) and np.isfinite(ma):
- if mi:
- im -= mi
- if ma != 255:
- im *= 255.0 / (ma - mi)
- assert np.nanmax(im) < 256
- return im.astype(np.uint8)
- # currently not used ... the only use it to easly provide the global Meta info
- def test_masked(self):
- mat = np.ma.fix_invalid(_ndat)
- msk = mat._mask.copy()
- for f in [np.nanmin]:
- res = f(mat, axis=1)
- tgt = f(_ndat, axis=1)
- assert_equal(res, tgt)
- assert_equal(mat._mask, msk)
- assert_(not np.isinf(mat).any())
- def test_nanmin(self):
- tgt = np.min(self.mat)
- for mat in self.integer_arrays():
- assert_equal(np.nanmin(mat), tgt)
- def data(self, data):
- """ :type: numppy.ndarray """
- self._assert_shape(data, self._x_indexes, self._y_indexes)
- data[data == -np.inf] = 0.0
- data[data == np.inf] = 0.0
- self._data = data
- self._min_value = np.nanmin(self.data)
- self._max_value = np.nanmax(self.data)
- self._data_x_indexes = list(range(data.shape[0]))
- self._data_y_indexes = list(range(data.shape[1]))
- self._dirty = False
- def sanitize_array(array):
- '''''' Replace NaN and Inf (there should not be any!)''''''
- a=np.ravel(array)
- maxi = np.nanmax((filter(lambda x: x != float(''inf''), a))) # Mini except NaN and Inf
- array[array==float(''inf'')]=maxi
- array[array==float(''-inf'')]=mini
- mid = (maxi + mini)/2
- array[np.isnan(array)]=mid
- return array
- def sanitize_array(array):
- '''''' Replace NaN and Inf (there should not be any!)''''''
- a=np.ravel(array)
- maxi = np.nanmax((filter(lambda x: x != float(''inf''), a))) # Mini except NaN and Inf
- array[array==float(''inf'')]=maxi
- array[array==float(''-inf'')]=mini
- mid = (maxi + mini)/2
- array[np.isnan(array)]=mid
- return array
- def _evaluate(self,x):
- ''''''
- Returns the level of the function at each value in x as the minimum among
- all of the functions. Only called internally by HARKinterpolator1D.__call__.
- ''''''
- if _isscalar(x):
- y = np.nanmin([f(x) for f in self.functions])
- else:
- m = len(x)
- fx = np.zeros((m,self.funcCount))
- for j in range(self.funcCount):
- fx[:,j] = self.functions[j](x)
- y = np.nanmin(fx,axis=1)
- return y
- def _evaluate(self,x,y):
- ''''''
- Returns the level of the function at each value in (x,y) as the minimum
- among all of the functions. Only called internally by
- HARKinterpolator2D.__call__.
- ''''''
- if _isscalar(x):
- f = np.nanmin([f(x,y) for f in self.functions])
- else:
- m = len(x)
- temp = np.zeros((m,self.funcCount))
- for j in range(self.funcCount):
- temp[:,j] = self.functions[j](x,y)
- f = np.nanmin(temp,axis=1)
- return f
- def _evaluate(self,y,z):
- ''''''
- Returns the level of the function at each value in (x,y,z) as the minimum
- among all of the functions. Only called internally by
- HARKinterpolator3D.__call__.
- ''''''
- if _isscalar(x):
- f = np.nanmin([f(x,z) for f in self.functions])
- else:
- m = len(x)
- temp = np.zeros((m,z)
- f = np.nanmin(temp,axis=1)
- return f
- def replot(self, val):
- ''''''
- ''''''
- # Update plot
- self.cadence = int(val)
- self.implot.set_data(self.images[int(val)])
- self.implot.set_clim(vmin = np.nanmin(self.images[int(val)]), vmax = np.nanmax(self.images[int(val)]))
- self.tracker1.set_xdata([self.time[self.cadence], self.time[self.cadence]])
- self.tracker2.set_xdata([self.time[self.cadence], self.time[self.cadence]])
- self.update_bkg()
- self.update_lc()
- self.update_lcbkg()
- self.fig.canvas.draw()
- def vmin(self):
- return self._vmin if self._vmin else np.nanmin(self.hic_matrix)
- def _plot(self, region=None, cax=None):
- da_sub, regions_sub = sub_data_regions(self.da, self.regions, region)
- da_sub_masked = np.ma.MaskedArray(da_sub, mask=np.isnan(da_sub))
- bin_coords = np.r_[[(x.start - 1) for x in regions_sub], regions_sub[-1].end]
- x, y = np.meshgrid(bin_coords, self.window_sizes)
- self.mesh = self.ax.pcolormesh(x, da_sub_masked, cmap=self.colormap, vmax=self.vmax)
- self.colorbar = plt.colorbar(self.mesh, cax=cax, orientation="vertical")
- self.window_size_line = self.ax.axhline(self.current_window_size, color=''red'')
- if self.log_y:
- self.ax.set_yscale("log")
- self.ax.set_ylim((np.nanmin(self.window_sizes), np.nanmax(self.window_sizes)))
- def _plot(self, cax=None):
- self._new_region(region)
- bin_coords = [(x.start - 1) for x in self.sr]
- ds = self.da_sub[self.init_row]
- self.line, = self.ax.plot(bin_coords, ds)
- if not self.is_symmetric:
- self.current_cutoff = (self.ax.get_ylim()[1] - self.ax.get_ylim()[0]) / 2 + self.ax.get_ylim()[0]
- else:
- self.current_cutoff = self.ax.get_ylim()[1]/ 2
- self.ax.axhline(0.0, linestyle=''dashed'', color=''grey'')
- self.cutoff_line = self.ax.axhline(self.current_cutoff, color=''r'')
- if self.is_symmetric:
- self.cutoff_line_mirror = self.ax.axhline(-1*self.current_cutoff, color=''r'')
- self.ax.set_ylim((np.nanmin(ds), np.nanmax(ds)))
- def update(self, ix=None, cutoff=None, update_canvas=True):
- if region is not None:
- self._new_region(region)
- if ix is not None and ix != self.current_ix:
- ds = self.da_sub[ix]
- self.current_ix = ix
- self.line.set_ydata(ds)
- self.ax.set_ylim((np.nanmin(ds), np.nanmax(ds)))
- if cutoff is None:
- if not self.is_symmetric:
- self.update(cutoff=(self.ax.get_ylim()[1]-self.ax.get_ylim()[0])/2 + self.ax.get_ylim()[0],
- update_canvas=False)
- else:
- self.update(cutoff=self.ax.get_ylim()[1] / 2, update_canvas=False)
- if update_canvas:
- self.fig.canvas.draw()
- if cutoff is not None and cutoff != self.current_cutoff:
- if self.is_symmetric:
- self.current_cutoff = abs(cutoff)
- else:
- self.current_cutoff = cutoff
- self.cutoff_line.set_ydata(self.current_cutoff)
- if self.is_symmetric:
- self.cutoff_line_mirror.set_ydata(-1*self.current_cutoff)
- if update_canvas:
- self.fig.canvas.draw()
- def define_levels(self, nb_class, disc_func):
- pot = self.pot
- _min = np.nanmin(pot)
- if not nb_class:
- nb_class = int(get_opt_nb_class(len(pot)) - 2)
- if not disc_func or "prog_geom" in disc_func:
- levels = [_min] + [
- np.nanmax(pot) / i for i in range(1, nb_class + 1)][::-1]
- elif "equal_interval" in disc_func:
- _bin = np.nanmax(pot) / nb_class
- levels = [_min] + [_bin * i for i in range(1, nb_class+1)]
- elif "percentiles" in disc_func:
- levels = np.percentile(
- np.concatenate((pot[pot.nonzero()], np.array([_min]))),
- np.linspace(0.0, 100.0, nb_class+1))
- elif "jenks" in disc_func:
- levels = list(jenks_breaks(np.concatenate(
- ([_min], pot[pot.nonzero()])), nb_class))
- levels[0] = levels[0] - _min * 0.01
- elif "head_tail" in disc_func:
- levels = head_tail_breaks(np.concatenate(
- ([_min], pot[pot.nonzero()])))
- elif "maximal_breaks" in disc_func:
- levels = maximal_breaks(np.concatenate(
- ([_min], nb_class)
- else:
- raise ValueError
- return levels
- def set_range(self, x_data, y_data):
- min_x, max_x = np.nanmin(x_data), np.nanmax(x_data)
- min_y, max_y = np.nanmin(y_data), np.nanmax(y_data)
- self.plotview.setRange(
- QRectF(min_x, min_y, max_x - min_x, max_y - min_y),
- padding=0.025)
- self.plotview.replot()
Jupyter 中的 Numpy 在打印时出错(Python 版本 3.8.8):TypeError: 'numpy.ndarray' object is not callable
如何解决Jupyter 中的 Numpy 在打印时出错(Python 版本 3.8.8):TypeError: ''numpy.ndarray'' object is not callable?
晚安, 尝试打印以下内容时,我在 jupyter 中遇到了 numpy 问题,并且得到了一个 错误: 需要注意的是python版本是3.8.8。 我先用 spyder 测试它,它运行正确,它给了我预期的结果
使用 Spyder:
import numpy as np
for i in range (5):
n = np.random.rand ()
print (n)
Results
0.6604903457995978
0.8236300859753154
0.16067650689842816
0.6967868357083673
0.4231597934445466
现在有了 jupyter
import numpy as np
for i in range (5):
n = np.random.rand ()
print (n)
-------------------------------------------------- ------
TypeError Traceback (most recent call last)
<ipython-input-78-0c6a801b3ea9> in <module>
2 for i in range (5):
3 n = np.random.rand ()
----> 4 print (n)
TypeError: ''numpy.ndarray'' object is not callable
感谢您对我如何在 Jupyter 中解决此问题的帮助。
非常感谢您抽出宝贵时间。
阿特,约翰”
解决方法
暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!
如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。
小编邮箱:dio#foxmail.com (将#修改为@)
numpy.random.random & numpy.ndarray.astype & numpy.arange
今天看到这样一句代码:
xb = np.random.random((nb, d)).astype(''float32'') #创建一个二维随机数矩阵(nb行d列)
xb[:, 0] += np.arange(nb) / 1000. #将矩阵第一列的每个数加上一个值
要理解这两句代码需要理解三个函数
1、生成随机数
numpy.random.random(size=None)
size为None时,返回float。
size不为None时,返回numpy.ndarray。例如numpy.random.random((1,2)),返回1行2列的numpy数组
2、对numpy数组中每一个元素进行类型转换
numpy.ndarray.astype(dtype)
返回numpy.ndarray。例如 numpy.array([1, 2, 2.5]).astype(int),返回numpy数组 [1, 2, 2]
3、获取等差数列
numpy.arange([start,]stop,[step,]dtype=None)
功能类似python中自带的range()和numpy中的numpy.linspace
返回numpy数组。例如numpy.arange(3),返回numpy数组[0, 1, 2]
numpy.ravel()/numpy.flatten()/numpy.squeeze()
numpy.ravel(a, order=''C'')
Return a flattened array
numpy.chararray.flatten(order=''C'')
Return a copy of the array collapsed into one dimension
numpy.squeeze(a, axis=None)
Remove single-dimensional entries from the shape of an array.
相同点: 将多维数组 降为 一维数组
不同点:
ravel() 返回的是视图(view),意味着改变元素的值会影响原始数组元素的值;
flatten() 返回的是拷贝,意味着改变元素的值不会影响原始数组;
squeeze()返回的是视图(view),仅仅是将shape中dimension为1的维度去掉;
ravel()示例:
1 import matplotlib.pyplot as plt
2 import numpy as np
3
4 def log_type(name,arr):
5 print("数组{}的大小:{}".format(name,arr.size))
6 print("数组{}的维度:{}".format(name,arr.shape))
7 print("数组{}的维度:{}".format(name,arr.ndim))
8 print("数组{}元素的数据类型:{}".format(name,arr.dtype))
9 #print("数组:{}".format(arr.data))
10
11 a = np.floor(10*np.random.random((3,4)))
12 print(a)
13 log_type(''a'',a)
14
15 a1 = a.ravel()
16 print("a1:{}".format(a1))
17 log_type(''a1'',a1)
18 a1[2] = 100
19
20 print(a)
21 log_type(''a'',a)
flatten()示例
1 import matplotlib.pyplot as plt
2 import numpy as np
3
4 def log_type(name,arr):
5 print("数组{}的大小:{}".format(name,arr.size))
6 print("数组{}的维度:{}".format(name,arr.shape))
7 print("数组{}的维度:{}".format(name,arr.ndim))
8 print("数组{}元素的数据类型:{}".format(name,arr.dtype))
9 #print("数组:{}".format(arr.data))
10
11 a = np.floor(10*np.random.random((3,4)))
12 print(a)
13 log_type(''a'',a)
14
15 a1 = a.flatten()
16 print("修改前a1:{}".format(a1))
17 log_type(''a1'',a1)
18 a1[2] = 100
19 print("修改后a1:{}".format(a1))
20
21 print("a:{}".format(a))
22 log_type(''a'',a)
squeeze()示例:
1. 没有single-dimensional entries的情况
1 import matplotlib.pyplot as plt
2 import numpy as np
3
4 def log_type(name,arr):
5 print("数组{}的大小:{}".format(name,arr.size))
6 print("数组{}的维度:{}".format(name,arr.shape))
7 print("数组{}的维度:{}".format(name,arr.ndim))
8 print("数组{}元素的数据类型:{}".format(name,arr.dtype))
9 #print("数组:{}".format(arr.data))
10
11 a = np.floor(10*np.random.random((3,4)))
12 print(a)
13 log_type(''a'',a)
14
15 a1 = a.squeeze()
16 print("修改前a1:{}".format(a1))
17 log_type(''a1'',a1)
18 a1[2] = 100
19 print("修改后a1:{}".format(a1))
20
21 print("a:{}".format(a))
22 log_type(''a'',a)
从结果中可以看到,当没有single-dimensional entries时,squeeze()返回额数组对象是一个view,而不是copy。
2. 有single-dimentional entries 的情况
1 import matplotlib.pyplot as plt
2 import numpy as np
3
4 def log_type(name,arr):
5 print("数组{}的大小:{}".format(name,arr.size))
6 print("数组{}的维度:{}".format(name,arr.shape))
7 print("数组{}的维度:{}".format(name,arr.ndim))
8 print("数组{}元素的数据类型:{}".format(name,arr.dtype))
9 #print("数组:{}".format(arr.data))
10
11 a = np.floor(10*np.random.random((1,3,4)))
12 print(a)
13 log_type(''a'',a)
14
15 a1 = a.squeeze()
16 print("修改前a1:{}".format(a1))
17 log_type(''a1'',a1)
18 a1[2] = 100
19 print("修改后a1:{}".format(a1))
20
21 print("a:{}".format(a))
22 log_type(''a'',a)
Numpy:数组创建 numpy.arrray() , numpy.arange()、np.linspace ()、数组基本属性
一、Numpy数组创建
part 1:np.linspace(起始值,终止值,元素总个数
import numpy as np
''''''
numpy中的ndarray数组
''''''
ary = np.array([1, 2, 3, 4, 5])
print(ary)
ary = ary * 10
print(ary)
''''''
ndarray对象的创建
''''''
# 创建二维数组
# np.array([[],[],...])
a = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
print(a)
# np.arange(起始值, 结束值, 步长(默认1))
b = np.arange(1, 10, 1)
print(b)
print("-------------np.zeros(数组元素个数, dtype=''数组元素类型'')-----")
# 创建一维数组:
c = np.zeros(10)
print(c, ''; c.dtype:'', c.dtype)
# 创建二维数组:
print(np.zeros ((3,4)))
print("----------np.ones(数组元素个数, dtype=''数组元素类型'')--------")
# 创建一维数组:
d = np.ones(10, dtype=''int64'')
print(d, ''; d.dtype:'', d.dtype)
# 创建三维数组:
print(np.ones( (2,3,4), dtype=np.int32 ))
# 打印维度
print(np.ones( (2,3,4), dtype=np.int32 ).ndim) # 返回:3(维)
结果图:
part 2 :np.linspace ( 起始值,终止值,元素总个数)
import numpy as np
a = np.arange( 10, 30, 5 )
b = np.arange( 0, 2, 0.3 )
c = np.arange(12).reshape(4,3)
d = np.random.random((2,3)) # 取-1到1之间的随机数,要求设置为诶2行3列的结构
print(a)
print(b)
print(c)
print(d)
print("-----------------")
from numpy import pi
print(np.linspace( 0, 2*pi, 100 ))
print("-------------np.linspace(起始值,终止值,元素总个数)------------------")
print(np.sin(np.linspace( 0, 2*pi, 100 )))
结果图:
二、Numpy的ndarray对象属性:
数组的结构:array.shape
数组的维度:array.ndim
元素的类型:array.dtype
数组元素的个数:array.size
数组的索引(下标):array[0]
''''''
数组的基本属性
''''''
import numpy as np
print("--------------------案例1:------------------------------")
a = np.arange(15).reshape(3, 5)
print(a)
print(a.shape) # 打印数组结构
print(len(a)) # 打印有多少行
print(a.ndim) # 打印维度
print(a.dtype) # 打印a数组内的元素的数据类型
# print(a.dtype.name)
print(a.size) # 打印数组的总元素个数
print("-------------------案例2:---------------------------")
a = np.array([[1, 2, 3], [4, 5, 6]])
print(a)
# 测试数组的基本属性
print(''a.shape:'', a.shape)
print(''a.size:'', a.size)
print(''len(a):'', len(a))
# a.shape = (6, ) # 此格式可将原数组结构变成1行6列的数据结构
# print(a, ''a.shape:'', a.shape)
# 数组元素的索引
ary = np.arange(1, 28)
ary.shape = (3, 3, 3) # 创建三维数组
print("ary.shape:",ary.shape,"\n",ary )
print("-----------------")
print(''ary[0]:'', ary[0])
print(''ary[0][0]:'', ary[0][0])
print(''ary[0][0][0]:'', ary[0][0][0])
print(''ary[0,0,0]:'', ary[0, 0, 0])
print("-----------------")
# 遍历三维数组:遍历出数组里的每个元素
for i in range(ary.shape[0]):
for j in range(ary.shape[1]):
for k in range(ary.shape[2]):
print(ary[i, j, k], end='' '')
结果图:
今天关于Python numpy 模块-nanmin() 实例源码和python中numpy模块的介绍到此结束,谢谢您的阅读,有关Jupyter 中的 Numpy 在打印时出错(Python 版本 3.8.8):TypeError: 'numpy.ndarray' object is not callable、numpy.random.random & numpy.ndarray.astype & numpy.arange、numpy.ravel()/numpy.flatten()/numpy.squeeze()、Numpy:数组创建 numpy.arrray() , numpy.arange()、np.linspace ()、数组基本属性等更多相关知识的信息可以在本站进行查询。
本文标签: