本篇文章给大家谈谈Pythonnumpy模块-in1d()实例源码,以及python中numpy模块的知识点,同时本文还将给你拓展Jupyter中的Numpy在打印时出错(Python版本3.8.8)
本篇文章给大家谈谈Python numpy 模块-in1d() 实例源码,以及python中numpy模块的知识点,同时本文还将给你拓展Jupyter 中的 Numpy 在打印时出错(Python 版本 3.8.8):TypeError: 'numpy.ndarray' object is not callable、Numpy .in1d 方法无法正确评估数组与数组视图?、numpy.random.random & numpy.ndarray.astype & numpy.arange、numpy.ravel()/numpy.flatten()/numpy.squeeze()等相关知识,希望对各位有所帮助,不要忘了收藏本站喔。
本文目录一览:- Python numpy 模块-in1d() 实例源码(python中numpy模块)
- Jupyter 中的 Numpy 在打印时出错(Python 版本 3.8.8):TypeError: 'numpy.ndarray' object is not callable
- Numpy .in1d 方法无法正确评估数组与数组视图?
- numpy.random.random & numpy.ndarray.astype & numpy.arange
- numpy.ravel()/numpy.flatten()/numpy.squeeze()
Python numpy 模块-in1d() 实例源码(python中numpy模块)
Python numpy 模块,in1d() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.in1d()。
- def take_slice_of_analogsignalarray_by_channelindex(self,
- channel_indexes=None):
- ''''''
- Return slices of the :class:`AnalogSignalArrays` in the
- :class:`Segment` that correspond to the :attr:`channel_indexes`
- provided.
- ''''''
- if channel_indexes is None:
- return []
- sliced_sigarrays = []
- for sigarr in self.analogsignals:
- if sigarr.get_channel_index() is not None:
- ind = np.in1d(sigarr.get_channel_index(), channel_indexes)
- sliced_sigarrays.append(sigarr[:, ind])
- return sliced_sigarrays
- def take_slice_of_analogsignalarray_by_channelindex(self, ind])
- return sliced_sigarrays
- def get_1000G_snps(sumstats, out_file):
- sf = np.loadtxt(sumstats,dtype=str,skiprows=1)
- h5f = h5py.File(''ref/Misc/1000G_SNP_info.h5'',''r'')
- rf = h5f[''snp_chr''][:]
- h5f.close()
- ind1 = np.in1d(sf[:,1],rf[:,2])
- ind2 = np.in1d(rf[:,2],sf[:,1])
- sf1 = sf[ind1]
- rf1 = rf[ind2]
- ### check order ###
- if sum(sf1[:,1]==rf1[:,2])==len(rf1[:,2]):
- print ''Good!''
- else:
- print ''Shit happens,sorting sf1 to have the same order as rf1''
- O1 = np.argsort(sf1[:,1])
- O2 = np.argsort(rf1[:,2])
- O3 = np.argsort(O2)
- sf1 = sf1[O1][O3]
- out = [''hg19chrc snpid a1 a2 bp or p''+''\\n'']
- for i in range(len(sf1[:,1])):
- out.append(sf1[:,0][i]+'' ''+sf1[:,1][i]+'' ''+sf1[:,2][i]+'' ''+sf1[:,3][i]+'' ''+rf1[:,5][i]+'' ''+sf1[:,6][i]+''\\n'')
- ff = open(out_file,"w")
- ff.writelines(out)
- ff.close()
- def test_multicollinearity(df, target_name, r2_threshold = 0.89):
- ''''''Tests if any of the features Could be predicted from others with R2 >= 0.89
- input: dataframe,name of target (to exclude)
- ''''''
- r2s = pd.DataFrame()
- for feature in df.columns.difference([target_name]):
- model = sk.linear_model.Ridge()
- model.fit(df[df.columns.difference([target_name,feature])], df[feature])
- pos = np.in1d(model.coef_, np.sort(model.coef_)[-5:])
- r2s = r2s.append(pd.DataFrame({''r2'':sk.metrics.r2_score(df[feature],\\
- model.predict(df[df.columns.difference([target_name, feature])])),\\
- ''predictors'' : str(df.columns.difference([target_name, feature])[np.ravel(np.argwhere(pos == True))].tolist())}, index = [feature]))
- print(''Testing'', feature)
- print(''-----------------'')
- if len(r2s[r2s[''r2''] >= r2_threshold]) > 0:
- print(''multicollinearity detected'')
- print(r2s[r2s[''r2''] >= r2_threshold])
- else:
- print(''No multicollinearity'')
- def __init__(self, **kwargs):
- logging.info(''Crossword __init__: Initializing crossword...'')
- logging.debug(''kwargs:'', kwargs)
- # Reading kwargs
- self.setup = kwargs
- self.rows = int(kwargs.get(''n'', 5))
- self.cols = int(kwargs.get(''m'', 5))
- self.words_file = str(kwargs.get(''word_file'', ''lemma.num.txt''))
- self.sort = bool(kwargs.get(''sort'', False))
- self.maximize_len = bool(kwargs.get(''maximize_len'', False))
- self.repeat_words = bool(kwargs.get(''repeat_words'', False))
- logging.debug(''Crossword __init__: n={},m={},fname={}''.format(self.rows, self.cols, self.words_file))
- # Loading words
- logging.debug(''Crossword __init__: Started loading words from {}''.format(self.words_file))
- arr = np.genfromtxt(self.words_file, dtype=''str'', delimiter='' '')
- self.words = arr[np.in1d(arr[:, 3], [''v'', ''n'', ''adv'', ''a''])][:, 2].tolist()
- # Number of words loaded
- logging.debug(''Crossword __init__: Number of words loaded: {}''.format(len(self.words)))
- self.words = list(set(x for x in self.words if len(x) <= self.rows and len(x) <= self.cols))
- if self.sort:
- self.words = sorted(self.words, key=len, reverse=self.maximize_len)
- # After filter logging
- logging.debug(''Crossword __init__: Number of words after filter: {},maxlen = {}''.format(len(self.words), len(
- max(self.words, key=len))))
- def test_df_col_or_idx_equivalence(df1,
- df2,
- col=None):
- ''''''check whether two dataframes contain the same elements (but not
- necessarily in the same order) in either the indexes or a selected column
- inputs
- df1,df2
- the dataframes to check
- col
- if not None,test this dataframe column for equivalency,otherwise
- test the dataframe indexes
- Returns True or False
- ''''''
- if not col:
- result = all(np.in1d(df1.index, df2.index,
- assume_unique=True,
- invert=False))
- else:
- result = all(np.in1d(df1[col], df2[col],
- assume_unique=False,
- invert=False))
- return result
- def make_classifier(self, name, ids, labels):
- """Entrenar un clasificador SVM sobre los textos cargados.
- Crea un clasificador que se guarda en el objeto bajo el nombre `name`.
- Args:
- name (str): Nombre para el clasidicador.
- ids (list): Se espera una lista de N ids de textos ya almacenados
- en el TextClassifier.
- labels (list): Se espera una lista de N etiquetas. Una por cada id
- de texto presente en ids.
- Nota:
- Usa el clasificador de `Scikit-learn <http://scikit-learn.org/>`_
- """
- if not all(np.in1d(ids, self.ids)):
- raise ValueError("Hay ids de textos que no se encuentran \\
- almacenados.")
- setattr(self, SGDClassifier())
- classifier = getattr(self, name)
- indices = np.searchsorted(self.ids, ids)
- classifier.fit(self.tfidf_mat[indices, :], labels)
- def retrain(self, labels):
- """Reentrenar parcialmente un clasificador SVM.
- Args:
- name (str): Nombre para el clasidicador.
- ids (list): Se espera una lista de N ids de textos ya almacenados
- en el TextClassifier.
- labels (list): Se espera una lista de N etiquetas. Una por cada id
- de texto presente en ids.
- Nota:
- Usa el clasificador de `Scikit-learn <http://scikit-learn.org/>`_
- """
- if not all(np.in1d(ids, self.ids)):
- raise ValueError("Hay ids de textos que no se encuentran \\
- almacenados.")
- try:
- classifier = getattr(self, name)
- except AttributeError:
- raise AttributeError("No hay ningun clasificador con ese nombre.")
- indices = np.in1d(self.ids, ids)
- if isinstance(labels, str):
- labels = [labels]
- classifier.partial_fit(self.tfidf_mat[indices, labels)
- def get_Feedback_data(self, on_level=None):
- Feedback = self.data.fields.Feedback
- eval_data = self.data.test.evalset[Feedback].values
- holdout = self.data.holdout_size
- Feedback_data = eval_data.reshape(-1, holdout)
- if on_level is not None:
- try:
- iter(on_level)
- except TypeError:
- Feedback_data = np.ma.masked_not_equal(Feedback_data, on_level)
- else:
- mask_level = np.in1d(Feedback_data.ravel(),
- on_level,
- invert=True).reshape(Feedback_data.shape)
- Feedback_data = np.ma.masked_where(mask_level, Feedback_data)
- return Feedback_data
- def _find_optimal_clustering(self,clusterings):
- max_score = float(''-inf'')
- max_clustering = None
- for clustering in clusterings:
- labeled_vectors = [(node.vector,cluster_idx) for cluster_idx in range(len(clustering)) for node in _get_cluster_nodes(clustering[cluster_idx][1]) ]
- vectors,labels = [np.array(x) for x in zip(*labeled_vectors)]
- if np.in1d([1],labels)[0]:
- score = silhouette_score(vectors,labels,metric=''cosine'')
- else:
- continue # silhouette doesn''t work with just one cluster
- if score > max_score:
- max_score = score
- max_clustering = clustering
- return zip(*max_clustering)[1] if max_clustering else zip(*clusterings[0])[1]
- def remove_rare_elements(data, min_user_activity, min_item_popularity):
- ''''''Removes user and items that appears in too few interactions.
- min_user_activity is the minimum number of interaction that a user should have.
- min_item_popularity is the minimum number of interaction that an item should have.
- NB: the constraint on item might not be strictly satisfied because rare users and items are removed in alternance,
- and the last removal of inactive users might create new rare items.
- ''''''
- print(''Remove inactive users and rare items...'')
- #Remove inactive users a first time
- user_activity = data.groupby(''u'').size()
- data = data[np.in1d(data.u, user_activity[user_activity >= min_user_activity].index)]
- #Remove unpopular items
- item_popularity = data.groupby(''i'').size()
- data = data[np.in1d(data.i, item_popularity[item_popularity >= min_item_popularity].index)]
- #Remove users that might have passed below the activity threshold due to the removal of rare items
- user_activity = data.groupby(''u'').size()
- data = data[np.in1d(data.u, user_activity[user_activity >= min_user_activity].index)]
- return data
- def reconstruct_goal(world):
- # pdb.set_trace()
- world = world.copy()
- ## indices for grass and puddle
- background_inds = [obj[''index''] for (name, obj) in library.objects.iteritems() if obj[''background'']]
- ## background mask
- background = np.in1d(world, background_inds)
- background = background.reshape( (world.shape) )
- ## set backgronud to 0
- world[background] = 0
- ## subtract largest background ind
- ## so indices of objects begin at 1
- world[~background] -= max(background_inds)
- world = np.expand_dims(np.expand_dims(world, 0), 0)
- # pdb.set_trace()
- return world
- def check_multiplication_dims(dims, N, M, vidx=False, without=False):
- dims = array(dims, ndmin=1)
- if len(dims) == 0:
- dims = arange(N)
- if without:
- dims = setdiff1d(range(N), dims)
- if not np.in1d(dims, arange(N)).all():
- raise ValueError(''Invalid dimensions'')
- P = len(dims)
- sidx = np.argsort(dims)
- sdims = dims[sidx]
- if vidx:
- if M > N:
- raise ValueError(''More multiplicants than dimensions'')
- if M != N and M != P:
- raise ValueError(''Invalid number of multiplicants'')
- if P == M:
- vidx = sidx
- else:
- vidx = sdims
- return sdims, vidx
- else:
- return sdims
- def particle_mask(self):
- # Dynamically create the masking array for particles,and get
- # the data using standard yt methods.
- if self._particle_mask is not None:
- return self._particle_mask
- # This is from disk.
- pid = self.__getitem__(''particle_index'')
- # This is from the sphere.
- if self._name == "RockstarHalo":
- ds = self.ds.sphere(self.CoM, self._radjust * self.max_radius)
- elif self._name == "LoadedHalo":
- ds = self.ds.sphere(self.CoM, np.maximum(self._radjust * \\
- self.ds.quan(self.max_radius, ''code_length''), \\
- self.ds.index.get_smallest_dx()))
- sp_pid = ds[''particle_index'']
- self._ds_sort = sp_pid.argsort()
- sp_pid = sp_pid[self._ds_sort]
- # This matches them up.
- self._particle_mask = np.in1d(sp_pid, pid)
- return self._particle_mask
- def has_approx_support(m, m_hat, prob=0.01):
- """Returns 1 if model selection error is less than or equal to prob rate,
- 0 else.
- NOTE: why does np.nonzero/np.flatnonzero create so much problems?
- """
- m_nz = np.flatnonzero(np.triu(m, 1))
- m_hat_nz = np.flatnonzero(np.triu(m_hat, 1))
- upper_diagonal_mask = np.flatnonzero(np.triu(np.ones(m.shape), 1))
- not_m_nz = np.setdiff1d(upper_diagonal_mask, m_nz)
- intersection = np.in1d(m_hat_nz, m_nz) # true positives
- not_intersection = np.in1d(m_hat_nz, not_m_nz) # false positives
- true_positive_rate = 0.0
- if len(m_nz):
- true_positive_rate = 1. * np.sum(intersection) / len(m_nz)
- true_negative_rate = 1. - true_positive_rate
- false_positive_rate = 0.0
- if len(not_m_nz):
- false_positive_rate = 1. * np.sum(not_intersection) / len(not_m_nz)
- return int(np.less_equal(true_negative_rate + false_positive_rate, prob))
- def get_membership_mask(self, labels, rows_or_columns):
- from .util import array_tostring
- assert rows_or_columns in [''rows'', ''columns'']
- assert isinstance(labels, np.ndarray)
- assert labels.size > 0
- if rows_or_columns == "rows":
- filter_labels = self.rowlabels
- else:
- filter_labels = self.columnlabels
- labels = array_tostring(labels)
- filter_labels = array_tostring(filter_labels)
- return np.in1d(filter_labels.ravel(),
- labels).reshape(filter_labels.shape)
- def discrete(self, x, bin=5):
- #res = np.array([0] * x.shape[-1],dtype=int)
- #?????????????????????woe?????????????<=?woe??
- x_copy = pd.Series.copy(x)
- x_copy = x_copy.astype(str)
- #x_copy = x_copy.astype(np.str_)
- #x_copy = x
- x_gt0 = x[x>=0]
- #if x.name == ''TD_pltF_CNT_1M'':
- #bin = 5
- #x_gt0 = x[(x>=0) & (x<=24)]
- for i in range(bin):
- point1 = stats.scoreatpercentile(x_gt0, i * (100.0/bin))
- point2 = stats.scoreatpercentile(x_gt0, (i + 1) * (100.0/bin))
- x1 = x[(x >= point1) & (x <= point2)]
- mask = np.in1d(x, x1)
- #x_copy[mask] = i + 1
- x_copy[mask] = ''%s-%s'' % (point1,point2)
- #x_copy[mask] = point1
- #print x_copy[mask]
- #print x
- #print x
- return x_copy
- def grade(self,dtype=int)
- #?????????????????????woe?????????????<=?woe??
- x_copy = np.copy(x)
- #x_copy = x_copy.astype(str)
- #x_copy = x_copy.astype(np.str_)
- #x_copy = x
- x_gt0 = x[x>=0]
- for i in range(bin):
- point1 = stats.scoreatpercentile(x_gt0, x1)
- #x_copy[mask] = i + 1
- x_copy[mask] = i + 1
- #x_copy[mask] = point1
- #print x_copy[mask]
- #print x
- print point1,point2
- #print x
- return x_copy
- def map_values(values, pos, target_pos, dtype=None, nan=dat.CPG_NAN):
- """Maps `values` array at positions `pos` to `target_pos`.
- Inserts `nan` for uncovered positions.
- """
- assert len(values) == len(pos)
- assert np.all(pos == np.sort(pos))
- assert np.all(target_pos == np.sort(target_pos))
- values = values.ravel()
- pos = pos.ravel()
- target_pos = target_pos.ravel()
- idx = np.in1d(pos, target_pos)
- pos = pos[idx]
- values = values[idx]
- if not dtype:
- dtype = values.dtype
- target_values = np.empty(len(target_pos), dtype=dtype)
- target_values.fill(nan)
- idx = np.in1d(target_pos, pos).nonzero()[0]
- assert len(idx) == len(values)
- assert np.all(target_pos[idx] == pos)
- target_values[idx] = values
- return target_values
- def test_learn_codes():
- """Test learning of codes."""
- thresh = 0.25
- X, ds, z = simulate_data(n_trials, n_times, n_times_atom, n_atoms)
- for solver in (''l_bfgs'', ''ista'', ''fista''):
- z_hat = update_z(X, reg, solver=solver,
- solver_kwargs=dict(factr=1e11, max_iter=50))
- X_hat = construct_X(z_hat, ds)
- assert_true(np.corrcoef(X.ravel(), X_hat.ravel())[1, 1] > 0.99)
- assert_true(np.max(X - X_hat) < 0.1)
- # Find position of non-zero entries
- idx = np.ravel_multi_index(z[0].nonzero(), z[0].shape)
- loc_x, loc_y = np.where(z_hat[0] > thresh)
- # shift position by half the length of atom
- idx_hat = np.ravel_multi_index((loc_x, loc_y), z_hat[0].shape)
- # make sure that the positions are a subset of the positions
- # in the original z
- mask = np.in1d(idx_hat, idx)
- assert_equal(np.sum(mask), len(mask))
- def __init__(self, topology, selstr=None, deg=False, cossin=False, periodic=True):
- indices = indices_phi(topology)
- if not selstr:
- self._phi_inds = indices
- else:
- self._phi_inds = indices[np.in1d(indices[:, 1],
- topology.select(selstr), assume_unique=True)]
- indices = indices_psi(topology)
- if not selstr:
- self._psi_inds = indices
- else:
- self._psi_inds = indices[np.in1d(indices[:, assume_unique=True)]
- # alternate phi,psi pairs (phi_1,psi_1,...,phi_n,psi_n)
- dih_indexes = np.array(list(phi_psi for phi_psi in
- zip(self._phi_inds, self._psi_inds))).reshape(-1, 4)
- super(BackbonetorsionFeature, self).__init__(topology, dih_indexes,
- deg=deg, cossin=cossin,
- periodic=periodic)
- def test_ttv_array_like_data_source(self):
- dummy_data_source = DummyDataSource()
- subject_info_dir = os.path.join(''test'', ''dummy_data'', ''Metadata'')
- ttv = yaml_to_dict(os.path.join(subject_info_dir, ''dummy_ttv.yaml''))
- array_ds = TtvarrayLikeDataSource(dummy_data_source, ttv)
- self.assertEqual(len(array_ds), 3)
- all_values = np.fromiter((x for x in array_ds[:]), dtype=''int16'')
- self.assertTrue(
- np.all(
- np.in1d(
- all_values,
- np.array([1, 2, 3])
- )
- )
- )
- def get_data(self, dataset, event_list=None):
- # Load Basics for this dataset and shift it by 1
- data = hax.minitrees.load_single_minitree(dataset, ''Basics'')
- df = data.shift(1)
- # Add prevIoUs_ prefix to all columns
- df = df.rename(columns=lambda x: ''prevIoUs_'' + x)
- # Add (unshifted) event number and run number,to support merging
- df[''event_number''] = data[''event_number'']
- df[''run_number''] = data[''run_number'']
- # Support for event list (lame)
- if event_list is not None:
- df = df[np.in1d(df[''event_number''].values, event_list)]
- return df
- def particle_mask(self):
- # Dynamically create the masking array for particles, pid)
- return self._particle_mask
- def extract_from_volume(vol_data, vox_ijk):
- """Extract data values (broadcasting across time if relevant)."""
- i, j, k = vox_ijk.T
- ii, jj, kk = vol_data.shape[:3]
- fov = (np.in1d(i, np.arange(ii)) &
- np.in1d(j, np.arange(jj)) &
- np.in1d(k, np.arange(kk)))
- if len(vol_data.shape) == 3:
- ntp = 1
- else:
- ntp = vol_data.shape[-1]
- roi_data = np.empty((len(i), ntp))
- roi_data[:] = np.nan
- roi_data[fov] = vol_data[i[fov], j[fov], k[fov]]
- return roi_data
- def clip_catalog(self):
- # ROI-specific catalog
- logger.debug("Clipping full catalog...")
- cut_observable = self.mask.restrictCatalogToObservableSpace(self.catalog_full)
- # All objects within disk ROI
- logger.debug("Creating roi catalog...")
- self.catalog_roi = self.catalog_full.applyCut(cut_observable)
- self.catalog_roi.project(self.roi.projector)
- self.catalog_roi.spatialBin(self.roi)
- # All objects interior to the background annulus
- logger.debug("Creating interior catalog...")
- cut_interior = numpy.in1d(ang2pix(self.config[''coords''][''nside_pixel''], self.catalog_roi.lon, self.catalog_roi.lat),
- self.roi.pixels_interior)
- #cut_interior = self.roi.inInterior(self.catalog_roi.lon,self.catalog_roi.lat)
- self.catalog_interior = self.catalog_roi.applyCut(cut_interior)
- self.catalog_interior.project(self.roi.projector)
- self.catalog_interior.spatialBin(self.roi)
- # Set the default catalog
- #logger.info("Using interior ROI for likelihood calculation")
- self.catalog = self.catalog_interior
- #self.pixel_roi_cut = self.roi.pixel_interior_cut
- def inFootprint(self, pixels, nside=None):
- """
- Open each valid filename for the set of pixels and determine the set
- of subpixels with valid data.
- """
- if numpy.isscalar(pixels): pixels = numpy.array([pixels])
- if nside is None: nside = self.nside_likelihood
- inside = numpy.zeros( len(pixels), dtype=''bool'')
- if not self.nside_catalog:
- catalog_pix = [0]
- else:
- catalog_pix = superpixel(pixels,nside,self.nside_catalog)
- catalog_pix = numpy.intersect1d(catalog_pix,self.catalog_pixels)
- for filenames in self.filenames[catalog_pix]:
- #logger.debug("Loading %s"%filenames[''mask_1''])
- subpix_1,val_1 = ugali.utils.skymap.readSparseHealpixmap(filenames[''mask_1''],''MAGLIM'',construct_map=False)
- #logger.debug("Loading %s"%filenames[''mask_2''])
- subpix_2,val_2 = ugali.utils.skymap.readSparseHealpixmap(filenames[''mask_2''],construct_map=False)
- subpix = numpy.intersect1d(subpix_1,subpix_2)
- superpix = numpy.unique(ugali.utils.skymap.superpixel(subpix,self.nside_pixel,nside))
- inside |= numpy.in1d(pixels, superpix)
- return inside
- def index_pixels(lon,lat,pixels,nside):
- """
- Find the index for object amoung a subset of healpix pixels.
- Set index of objects outside the pixel subset to -1
- # ADW: Not really safe to set index = -1 (accesses last entry);
- # -np.inf would be better,but breaks other code...
- """
- pix = ang2pix(nside,lon,lat)
- # pixels should be pre-sorted,otherwise...???
- index = np.searchsorted(pixels,pix)
- if np.isscalar(index):
- if not np.in1d(pix,pixels).any(): index = -1
- else:
- # Find objects that are outside the roi
- #index[np.take(pixels,index,mode=''clip'')!=pix] = -1
- index[~np.in1d(pix,pixels)] = -1
- return index
- ############################################################
- def get(self, names=None, burn=None, clip=None):
- if names is None: names = list(self.dtype.names)
- names = np.array(names,ndmin=1)
- missing = names[~np.in1d(names,self.dtype.names)]
- if len(missing):
- msg = "field(s) named %s not found"%(missing)
- raise ValueError(msg)
- #idx = np.where(np.in1d(self.dtype.names,names))[0]
- idx = np.array([self.dtype.names.index(n) for n in names])
- # Remove zero entries
- zsel = ~np.all(self.ndarray==0,axis=1)
- # Remove burn entries
- bsel = np.zeros(len(self),dtype=bool)
- bsel[slice(burn,None)] = 1
- data = self.ndarray[:,idx][bsel&zsel]
- if clip is not None:
- from astropy.stats import sigma_clip
- mask = sigma_clip(data,sig=clip,copy=False,axis=0).mask
- data = data[np.where(~mask.any(axis=1))]
- return data
- def _setup_subpix(self,nside=2**16):
- """
- Subpixels for random position generation.
- """
- # Only setup once...
- if hasattr(self,''subpix''): return
- # Simulate over full ROI
- self.roi_radius = self.config[''coords''][''roi_radius'']
- # Setup background spatial stuff
- logger.info("Setup subpixels...")
- self.nside_pixel = self.config[''coords''][''nside_pixel'']
- self.nside_subpixel = self.nside_pixel * 2**4 # Could be config parameter
- epsilon = np.degrees(healpy.max_pixrad(self.nside_pixel)) # Pad roi radius to cover edge healpix
- subpix = ugali.utils.healpix.query_disc(self.nside_subpixel,self.roi.vec,self.roi_radius+epsilon)
- superpix = ugali.utils.healpix.superpixel(subpix,self.nside_subpixel,self.nside_pixel)
- self.subpix = subpix[np.in1d(superpix,self.roi.pixels)]
- def iterate_minibatches(self, batchsize, shuffle=True, train=True):
- indices = []
- if train:
- indices = np.argwhere(np.in1d(data.labels, data.train_classes))
- else:
- indices = np.argwhere(np.logical_not(np.in1d(data.labels, data.train_classes)))
- if shuffle:
- np.random.shuffle(indices)
- for start_idx in range(0, len(self.img_paths) - batchsize + 1, batchsize):
- excerpt = indices[start_idx:start_idx + batchsize]
- images = [self._load_preprocess_img(self.img_paths[int(i)]) for i in excerpt]
- if len(images) == batchsize:
- yield np.concatenate(images), np.array(self.labels[excerpt]).astype(np.int32).T
- else:
- raise stopiteration
- def GetEdgeMask(self, angle):
- """
- Returns a mask of the points of a surface mesh that have a surface
- angle greater than angle
- Parameters
- ----------
- angle : float
- Angle to consider an edge.
- """
- featureEdges = vtk.vtkFeatureEdges()
- featureEdges.SetInputData(self)
- featureEdges.FeatureEdgesOn()
- featureEdges.BoundaryEdgesOff()
- featureEdges.NonManifoldEdgesOff()
- featureEdges.ManifoldEdgesOff()
- featureEdges.SetFeatureAngle(angle)
- featureEdges.Update()
- edges = featureEdges.Getoutput()
- origID = vtkInterface.GetPointScalars(edges, ''vtkOriginalPointIds'')
- return np.in1d(self.GetPointScalars(''vtkOriginalPointIds''),
- origID,
- assume_unique=True)
- def RaDec2region(ra, dec, nside):
- SCP_indx, nes_indx, GP_indx, WFD_indx = mutually_exclusive_regions(nside)
- indices = _raDec2Hpid(nside, np.radians(ra), np.radians(dec))
- result = np.empty(np.size(indices), dtype = object)
- SCP = np.in1d(indices, SCP_indx)
- nes = np.in1d(indices,nes_indx)
- GP = np.in1d(indices,GP_indx)
- WFD = np.in1d(indices,WFD_indx)
- result[SCP] = ''SCP''
- result[nes] = ''nes''
- result[GP] = ''GP''
- result[WFD] = ''WFD''
- return result
- def __getitem__(self, thing: Any) -> sparse.coo_matrix:
- if type(thing) is slice or type(thing) is np.ndarray or type(thing) is int:
- gm = GraphManager(None, axis=self.axis)
- for key, g in self.items():
- # Slice the graph matrix properly without making it dense
- (a, b, w) = (g.row, g.col, g.data)
- indices = np.arange(g.shape[0])[thing]
- mask = np.logical_and(np.in1d(a, indices), np.in1d(b, indices))
- a = a[mask]
- b = b[mask]
- w = w[mask]
- d = dict(zip(np.sort(indices), np.arange(indices.shape[0])))
- a = np.array([d[x] for x in a])
- b = np.array([d[x] for x in b])
- gm[key] = sparse.coo_matrix((w, (a, b)), shape=(len(indices), len(indices)))
- return gm
- else:
- return self.__getattr__(thing)
- def get_data_by_id(self, ids):
- """ Helper for getting current data values from stored identifiers
- :param float|list ids: ids for which data are requested
- :return: the stored ids
- :rtype: np.ndarray
- """
- if self.ids is None:
- raise ValueError("IDs not stored in node {}".format(self.name))
- if self.data is None:
- raise ValueError("No data in node {}".format(self.name))
- ids = np.array(ids, ndmin=1, copy=False)
- found_items = np.in1d(ids, self.ids)
- if not np.all(found_items):
- raise ValueError("Cannot find {} among {}".format(ids[np.logical_not(found_items)],
- self.name))
- idx = np.empty(len(ids), dtype=''int'')
- for k, this_id in enumerate(ids):
- if self.ids.ndim > 1:
- idx[k] = np.flatnonzero(np.all(self.ids == this_id, axis=1))[0]
- else:
- idx[k] = np.flatnonzero(self.ids == this_id)[0]
- return np.array(self.data, ndmin=1)[idx]
- def split_data(data, num_folds, seed=0):
- """ Split all interactions into K-fold sets of training and test dataframes. Splitting is done
- by assigning student ids to the training or test sets.
- :param pd.DataFrame data: all interactions
- :param int num_folds: number of folds
- :param int seed: seed for the splitting
- :return: a generator over (train dataframe,test dataframe) tuples
- :rtype: generator[(pd.DataFrame,pd.DataFrame)]
- """
- # break up students into folds
- fold_student_idx = _get_fold_student_idx(np.unique(data[USER_IDX_KEY]), num_folds=num_folds,
- seed=seed)
- for fold_test_student_idx in fold_student_idx:
- test_idx = np.in1d(data[USER_IDX_KEY], fold_test_student_idx)
- train_idx = np.logical_not(test_idx)
- yield (data[train_idx].copy(), data[test_idx].copy())
- def eval_loop(data_loader, model, base_classes, novel_classes):
- model = model.eval()
- top1 = None
- top5 = None
- all_labels = None
- for i, (x,y) in enumerate(data_loader):
- x = Variable(x.cuda())
- scores = model(x)
- top1_this, top5_this = perelement_accuracy(scores.data, y)
- top1 = top1_this if top1 is None else np.concatenate((top1, top1_this))
- top5 = top5_this if top5 is None else np.concatenate((top5, top5_this))
- all_labels = y.numpy() if all_labels is None else np.concatenate((all_labels, y.numpy()))
- is_novel = np.in1d(all_labels, novel_classes)
- is_base = np.in1d(all_labels, base_classes)
- is_either = is_novel | is_base
- top1_novel = np.mean(top1[is_novel])
- top1_base = np.mean(top1[is_base])
- top1_all = np.mean(top1[is_either])
- top5_novel = np.mean(top5[is_novel])
- top5_base = np.mean(top5[is_base])
- top5_all = np.mean(top5[is_either])
- return np.array([top1_novel, top5_novel, top1_base, top5_base, top1_all, top5_all])
- def _mask_edges_weights(mask, edges, weights=None):
- """Apply a mask to edges (weighted or not)"""
- inds = np.arange(mask.size)
- inds = inds[mask.ravel()]
- ind_mask = np.logical_and(np.in1d(edges[0], inds),
- np.in1d(edges[1], inds))
- edges = edges[:, ind_mask]
- if weights is not None:
- weights = weights[ind_mask]
- if len(edges.ravel()):
- maxval = edges.max()
- else:
- maxval = 0
- order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
- edges = order[edges]
- if weights is None:
- return edges
- else:
- return edges, weights
- def map_2D_hist_to_ima(imaSlc2volHistMap, volHistMask):
- """Volume histogram to image mapping for slices (uses np.ind1).
- Parameters
- ----------
- imaSlc2volHistMap : Todo
- volHistMask : Todo
- Returns
- -------
- imaSlcMask : Todo
- """
- imaSlcMask = np.zeros(imaSlc2volHistMap.flatten().shape)
- idxUnique = np.unique(volHistMask)
- for idx in idxUnique:
- linIndices = np.where(volHistMask.flatten() == idx)[0]
- # return logical array with length equal to nr of voxels
- voxMask = np.in1d(imaSlc2volHistMap.flatten(), linIndices)
- # reset mask and apply logical indexing
- imaSlcMask[voxMask] = idx
- imaSlcMask = imaSlcMask.reshape(imaSlc2volHistMap.shape)
- return imaSlcMask
- def reconstruct_goal(world):
- # pdb.set_trace()
- world = world.copy()
- ## indices for grass and puddle
- background_inds = [obj[''index''] for (name, 0)
- # pdb.set_trace()
- return world
- def detect_input(cls, values, sample_size=200):
- """
- Return first "from_" method that in more than 50% matches values,
- or None.
- """
- assert isinstance(values, pd.Series)
- values = values.drop_duplicates().dropna()
- if len(values) > sample_size:
- values = values.sample(sample_size)
- strlen = values.str.len().dropna().unique()
- for method, *cond in ((cls.from_cc2, len(strlen) == 1 and strlen[0] == 2),
- (cls.from_cc3, len(strlen) == 1 and strlen[0] == 3),
- (cls.from_cc_name,),
- (cls.from_us_state,
- (cls.from_city_eu,
- (cls.from_city_us,
- (cls.from_city_world,
- (cls.from_region,
- (cls.from_fips,
- (cls.from_hasc, np.in1d(strlen, [2, 5, 8]).all())):
- if cond and not cond[0]:
- continue
- if sum(map(bool, method(values))) >= len(values) / 2:
- return method
- return None
- def init_snapshots(self):
- """Initialize snapshots for model variables given in attributes of
- Dataset.
- """
- self.snapshot_vars = self.dataset.xsimlab.snapshot_vars
- self.snapshot_values = {}
- for vars in self.snapshot_vars.values():
- self.snapshot_values.update({v: [] for v in vars})
- self.snapshot_save = {
- clock: np.in1d(self.dataset[self.master_clock_dim].values,
- self.dataset[clock].values)
- for clock in self.snapshot_vars if clock is not None
- }
- def crossGenotypeWindows(commonSNPsCHR, commonSNPsPOS, snpsP1, snpsP2, inFile, binLen, outFile, logDebug = True):
- ## inFile are the SNPs of the sample
- (snpCHR, snpPOS, snpGT, snpWEI, DPmean) = snpmatch.parseInput(inFile = inFile, logDebug = logDebug)
- # identifying the segregating SNPs between the accessions
- # only selecting 0 or 1
- segSNPsind = np.where((snpsP1 != snpsP2) & (snpsP1 >= 0) & (snpsP2 >= 0) & (snpsP1 < 2) & (snpsP2 < 2))[0]
- log.info("number of segregating snps between parents: %s", len(segSNPsind))
- (ChrBins, PosBins) = getBinsSNPs(commonSNPsCHR, binLen)
- log.info("number of bins: %s", len(ChrBins))
- outfile = open(outFile, ''w'')
- for i in range(len(PosBins)):
- start = np.sum(PosBins[0:i])
- end = start + PosBins[i]
- # first snp positions which are segregating and are in this window
- reqPOSind = segSNPsind[np.where((segSNPsind < end) & (segSNPsind >= start))[0]]
- reqPOS = commonSNPsPOS[reqPOSind]
- perchrTarPosind = np.where(snpCHR == ChrBins[i])[0]
- perchrTarPos = snpPOS[perchrTarPosind]
- matchedAccInd = reqPOSind[np.where(np.in1d(reqPOS, perchrTarPos))[0]]
- matchedTarInd = perchrTarPosind[np.where(np.in1d(perchrTarPos, reqPOS))[0]]
- matchedTarGTs = snpGT[matchedTarInd]
- try:
- TarGTBinary = snpmatch.parseGT(matchedTarGTs)
- TarGTBinary[np.where(TarGTBinary == 2)[0]] = 4
- genP1 = np.subtract(TarGTBinary, snpsP1[matchedAccInd])
- genP1no = len(np.where(genP1 == 0)[0])
- (geno, pval) = getwindowGenotype(genP1no, len(genP1))
- outfile.write("%s\\t%s\\t%s\\t%s\\t%s\\n" % (i+1, genP1no, len(genP1), geno, pval))
- except:
- outfile.write("%s\\tNA\\tNA\\tNA\\tNA\\n" % (i+1))
- if i % 40 == 0:
- log.info("progress: %s windows", i+10)
- log.info("done!")
- outfile.close()
- def intersect_and_sort_samples(sample_Metadata, feature_table):
- ''''''Return input tables retaining only shared samples,row order equivalent.
- Parameters
- ----------
- sample_Metadata : pd.DataFrame
- Contingency table with rows,columns = samples,Metadata.
- feature_table : pd.DataFrame
- Contingency table with rows,features.
- Returns
- -------
- sample_Metadata,feature_table : pd.DataFrame,pd.DataFrame
- Input tables with unshared samples removed and ordered equivalently.
- Raises
- ------
- ValueError
- If no shared samples are found.
- ''''''
- shared_samples = np.intersect1d(sample_Metadata.index, feature_table.index)
- if shared_samples.size == 0:
- raise ValueError(''There are no shared samples between the feature ''
- ''table and the sample Metadata. Ensure that you have ''
- ''passed the correct files.'')
- elif (shared_samples.size == sample_Metadata.shape[0] ==
- feature_table.shape[0]):
- s_Metadata = sample_Metadata.copy()
- s_features = feature_table.copy()
- else:
- s_Metadata = sample_Metadata.loc[np.in1d(sample_Metadata.index,
- shared_samples), :].copy()
- s_features = feature_table.loc[np.in1d(feature_table.index,
- shared_samples), :].copy()
- return s_Metadata, s_features.loc[s_Metadata.index, :]
- def prepare_input(d,q):
- f = np.zeros(d.shape[:2]).astype(''int32'')
- for i in range(d.shape[0]):
- f[i,:] = np.in1d(d[i,:,0],q[i,0])
- return f
- def get_piece_bool(num, dict):
- ''''''Uses a vertex number to find the right bool array
- as created by divide_garment()''''''
- count = 0
- nums = dict[''garment_pieces''][''numbers_array'']
- for i in nums:
- if np.in1d(num, i):
- return count
- count += 1
- def find_linked(ob, vert, per_face=''empty''):
- ''''''Takes a vert and returns an array of linked face indices''''''
- the_coffee_is_hot = True
- fidx = np.arange(len(ob.data.polygons))
- eidx = np.arange(len(ob.data.edges))
- f_set = np.array([])
- e_set = np.array([])
- verts = ob.data.vertices
- verts[vert].select = True
- v_p_f_count = [len(p.vertices) for p in ob.data.polygons]
- max_count = np.max(v_p_f_count)
- if per_face == ''empty'':
- per_face = [[i for i in poly.vertices] for poly in ob.data.polygons]
- for i in per_face:
- for j in range(max_count-len(i)):
- i.append(i[0])
- verts_per_face = np.array(per_face)
- vert=np.array([vert])
- while the_coffee_is_hot:
- booly = np.any(np.in1d(verts_per_face, vert).reshape(verts_per_face.shape), axis=1)
- f_set = np.append(f_set, fidx[booly])
- new_verts = verts_per_face[booly].ravel()
- if len(new_verts) == 0:
- return np.array(f_set, dtype=np.int64)
- cull = np.in1d(new_verts, vert)
- vert = new_verts[-cull]
- verts_per_face = verts_per_face[-booly]
- fidx = fidx[-booly]
- def divide_garment(ob, dict):
- ''''''Creates a set of bool arrays and a set of number arrays
- for indexing a sub set of the uv coords. The nuber arrays can
- be used to look up wich bool array to use based on a vertex number''''''
- if ob == ''empty'':
- ob = bpy.context.object
- #-----------------------------------
- v_count = len(ob.data.vertices)
- idx = np.arange(v_count)
- full_set = np.array([])
- dict[''islands''] = []
- v_list = [[i for i in poly.vertices] for poly in ob.data.polygons]
- v_in_faces = np.hstack(v_list)
- dict[''v_in_faces''] = v_in_faces
- remaining = [1]
- vert = 0
- while len(remaining) > 0:
- linked = find_linked(ob, v_list)
- selected = np.unique(np.hstack(np.array(v_list)[linked]).ravel())
- dict[''islands''].append(selected)
- full_set = np.append(full_set, selected)
- remain_bool = np.in1d(idx, full_set, invert=True)
- remaining = idx[remain_bool]
- if len(remaining) == 0:
- break
- vert = remaining[0]
- #################################
- def setdiff1d(ar1, ar2, assume_unique=False):
- """
- Find the set difference of two arrays.
- Return the sorted,unique values in `ar1` that are not in `ar2`.
- Parameters
- ----------
- ar1 : array_like
- Input array.
- ar2 : array_like
- Input comparison array.
- assume_unique : bool
- If True,the input arrays are both assumed to be unique,which
- can speed up the calculation. Default is False.
- Returns
- -------
- setdiff1d : ndarray
- Sorted 1D array of values in `ar1` that are not in `ar2`.
- See Also
- --------
- numpy.lib.arraysetops : Module with a number of other functions for
- performing set operations on arrays.
- Examples
- --------
- >>> a = np.array([1,2,3,4,1])
- >>> b = np.array([3,5,6])
- >>> np.setdiff1d(a,b)
- array([1,2])
- """
- if assume_unique:
- ar1 = np.asarray(ar1).ravel()
- else:
- ar1 = unique(ar1)
- ar2 = unique(ar2)
- return ar1[in1d(ar1, assume_unique=True, invert=True)]
- def set_snapshot_weights(ratio_dict,
- orig_rng,
- eg_range):
- ''''''Determine the job distribution ratios to carry forward during
- the ratio condition application period using actual jobs held ratios.
- likely called at implementation month by main job assignment function
- Count the number of jobs held by each of the ratio groups for each of the
- affected job level numbers. Set the weightings in the distribute function
- accordingly.
- inputs
- ratio_dict (dictionary)
- dictionary containing job levels as keys and ratio groups,
- weightings,month_start and month end as values.
- orig_rng (numpy array)
- month slice of original job array
- eg_range (numpy array)
- month slice of employee group code array
- ''''''
- ratio_dict = copy.deepcopy(ratio_dict)
- job_nums = list(ratio_dict.keys())
- for job in job_nums:
- wgt_list = []
- for ratio_group in ratio_dict[job][0]:
- wgt_list.append(np.count_nonzero((orig_rng == job) &
- (np.in1d(eg_range, ratio_group))))
- ratio_dict[job][1] = tuple(wgt_list)
- return ratio_dict
- # ASSIGN JOBS BY RATIO CONDITION
Jupyter 中的 Numpy 在打印时出错(Python 版本 3.8.8):TypeError: 'numpy.ndarray' object is not callable
如何解决Jupyter 中的 Numpy 在打印时出错(Python 版本 3.8.8):TypeError: ''numpy.ndarray'' object is not callable?
晚安, 尝试打印以下内容时,我在 jupyter 中遇到了 numpy 问题,并且得到了一个 错误: 需要注意的是python版本是3.8.8。 我先用 spyder 测试它,它运行正确,它给了我预期的结果
使用 Spyder:
import numpy as np
for i in range (5):
n = np.random.rand ()
print (n)
Results
0.6604903457995978
0.8236300859753154
0.16067650689842816
0.6967868357083673
0.4231597934445466
现在有了 jupyter
import numpy as np
for i in range (5):
n = np.random.rand ()
print (n)
-------------------------------------------------- ------
TypeError Traceback (most recent call last)
<ipython-input-78-0c6a801b3ea9> in <module>
2 for i in range (5):
3 n = np.random.rand ()
----> 4 print (n)
TypeError: ''numpy.ndarray'' object is not callable
感谢您对我如何在 Jupyter 中解决此问题的帮助。
非常感谢您抽出宝贵时间。
阿特,约翰”
解决方法
暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!
如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。
小编邮箱:dio#foxmail.com (将#修改为@)
Numpy .in1d 方法无法正确评估数组与数组视图?
如何解决Numpy .in1d 方法无法正确评估数组与数组视图?
我正在尝试搜索并查看一个 numpy 数组是否在另一个数组中以进行调试。
#Pattern
arr1 = np.array([1.62434536,-0.61175641,-0.52817175])
#type : np.ndarray
#dtype : ''float64''
#shape : (3,)
然后我有一个 list 元组,其中每个元组中的第一个元素是一个 n × m ndarray 假设这个对象叫做“my_nest”
arr2 = my_nest[0][0][0][0:3]
arr2
#array([ 1.62434536,)
但是使用 in1d 方法返回一个不直观的结果
np.in1d(arr1,arr2)
#array([False,False,False],dtype=bool)
我知道切片 ndarray 会创建对象在内存中的视图,但我什至尝试将 np.copy 包裹在它周围以在内存中创建一个新对象,然后进行比较,我仍然得到 False。
有人知道这里发生了什么吗?
解决方法
正如评论中提到的,这是浮点精度的影响。您可以使用 source for small arrays 而不是 in1d
根据其 isclose
重新实现 ==
。
import numpy as np
arr1 = np.array([1.62434536,-0.61175641,-0.52817175])
arr2 = np.array([1.62434536,-0.52817175+1e-12])
print(arr1)
print(arr2)
print(''isin: '',np.in1d(arr1,arr2))
mask = np.zeros(len(arr1),dtype=bool)
for a in arr2:
mask |= np.isclose(arr1,a)
print(''isclose:'',mask)
输出:
[ 1.62434536 -0.61175641 -0.52817175]
[ 1.62434536 -0.61175641 -0.52817175]
isin: [ True True False]
isclose: [ True True True]
numpy.random.random & numpy.ndarray.astype & numpy.arange
今天看到这样一句代码:
xb = np.random.random((nb, d)).astype(''float32'') #创建一个二维随机数矩阵(nb行d列)
xb[:, 0] += np.arange(nb) / 1000. #将矩阵第一列的每个数加上一个值
要理解这两句代码需要理解三个函数
1、生成随机数
numpy.random.random(size=None)
size为None时,返回float。
size不为None时,返回numpy.ndarray。例如numpy.random.random((1,2)),返回1行2列的numpy数组
2、对numpy数组中每一个元素进行类型转换
numpy.ndarray.astype(dtype)
返回numpy.ndarray。例如 numpy.array([1, 2, 2.5]).astype(int),返回numpy数组 [1, 2, 2]
3、获取等差数列
numpy.arange([start,]stop,[step,]dtype=None)
功能类似python中自带的range()和numpy中的numpy.linspace
返回numpy数组。例如numpy.arange(3),返回numpy数组[0, 1, 2]
numpy.ravel()/numpy.flatten()/numpy.squeeze()
numpy.ravel(a, order=''C'')
Return a flattened array
numpy.chararray.flatten(order=''C'')
Return a copy of the array collapsed into one dimension
numpy.squeeze(a, axis=None)
Remove single-dimensional entries from the shape of an array.
相同点: 将多维数组 降为 一维数组
不同点:
ravel() 返回的是视图(view),意味着改变元素的值会影响原始数组元素的值;
flatten() 返回的是拷贝,意味着改变元素的值不会影响原始数组;
squeeze()返回的是视图(view),仅仅是将shape中dimension为1的维度去掉;
ravel()示例:
1 import matplotlib.pyplot as plt
2 import numpy as np
3
4 def log_type(name,arr):
5 print("数组{}的大小:{}".format(name,arr.size))
6 print("数组{}的维度:{}".format(name,arr.shape))
7 print("数组{}的维度:{}".format(name,arr.ndim))
8 print("数组{}元素的数据类型:{}".format(name,arr.dtype))
9 #print("数组:{}".format(arr.data))
10
11 a = np.floor(10*np.random.random((3,4)))
12 print(a)
13 log_type(''a'',a)
14
15 a1 = a.ravel()
16 print("a1:{}".format(a1))
17 log_type(''a1'',a1)
18 a1[2] = 100
19
20 print(a)
21 log_type(''a'',a)
flatten()示例
1 import matplotlib.pyplot as plt
2 import numpy as np
3
4 def log_type(name,arr):
5 print("数组{}的大小:{}".format(name,arr.size))
6 print("数组{}的维度:{}".format(name,arr.shape))
7 print("数组{}的维度:{}".format(name,arr.ndim))
8 print("数组{}元素的数据类型:{}".format(name,arr.dtype))
9 #print("数组:{}".format(arr.data))
10
11 a = np.floor(10*np.random.random((3,4)))
12 print(a)
13 log_type(''a'',a)
14
15 a1 = a.flatten()
16 print("修改前a1:{}".format(a1))
17 log_type(''a1'',a1)
18 a1[2] = 100
19 print("修改后a1:{}".format(a1))
20
21 print("a:{}".format(a))
22 log_type(''a'',a)
squeeze()示例:
1. 没有single-dimensional entries的情况
1 import matplotlib.pyplot as plt
2 import numpy as np
3
4 def log_type(name,arr):
5 print("数组{}的大小:{}".format(name,arr.size))
6 print("数组{}的维度:{}".format(name,arr.shape))
7 print("数组{}的维度:{}".format(name,arr.ndim))
8 print("数组{}元素的数据类型:{}".format(name,arr.dtype))
9 #print("数组:{}".format(arr.data))
10
11 a = np.floor(10*np.random.random((3,4)))
12 print(a)
13 log_type(''a'',a)
14
15 a1 = a.squeeze()
16 print("修改前a1:{}".format(a1))
17 log_type(''a1'',a1)
18 a1[2] = 100
19 print("修改后a1:{}".format(a1))
20
21 print("a:{}".format(a))
22 log_type(''a'',a)
从结果中可以看到,当没有single-dimensional entries时,squeeze()返回额数组对象是一个view,而不是copy。
2. 有single-dimentional entries 的情况
1 import matplotlib.pyplot as plt
2 import numpy as np
3
4 def log_type(name,arr):
5 print("数组{}的大小:{}".format(name,arr.size))
6 print("数组{}的维度:{}".format(name,arr.shape))
7 print("数组{}的维度:{}".format(name,arr.ndim))
8 print("数组{}元素的数据类型:{}".format(name,arr.dtype))
9 #print("数组:{}".format(arr.data))
10
11 a = np.floor(10*np.random.random((1,3,4)))
12 print(a)
13 log_type(''a'',a)
14
15 a1 = a.squeeze()
16 print("修改前a1:{}".format(a1))
17 log_type(''a1'',a1)
18 a1[2] = 100
19 print("修改后a1:{}".format(a1))
20
21 print("a:{}".format(a))
22 log_type(''a'',a)
今天关于Python numpy 模块-in1d() 实例源码和python中numpy模块的讲解已经结束,谢谢您的阅读,如果想了解更多关于Jupyter 中的 Numpy 在打印时出错(Python 版本 3.8.8):TypeError: 'numpy.ndarray' object is not callable、Numpy .in1d 方法无法正确评估数组与数组视图?、numpy.random.random & numpy.ndarray.astype & numpy.arange、numpy.ravel()/numpy.flatten()/numpy.squeeze()的相关知识,请在本站搜索。
本文标签: