想了解Pythonnumpy模块-apply_along_axis()实例源码的新动态吗?本文将为您提供详细的信息,我们还将为您解答关于numpy.apply_along_axis的相关问题,此外,我
想了解Python numpy 模块-apply_along_axis() 实例源码的新动态吗?本文将为您提供详细的信息,我们还将为您解答关于numpy.apply_along_axis的相关问题,此外,我们还将为您介绍关于Jupyter 中的 Numpy 在打印时出错(Python 版本 3.8.8):TypeError: 'numpy.ndarray' object is not callable、Numpy.apply_along_axis 在应用具有 if else 条件的函数时意外工作、numpy.random.random & numpy.ndarray.astype & numpy.arange、numpy.ravel()/numpy.flatten()/numpy.squeeze()的新知识。
本文目录一览:- Python numpy 模块-apply_along_axis() 实例源码(numpy.apply_along_axis)
- Jupyter 中的 Numpy 在打印时出错(Python 版本 3.8.8):TypeError: 'numpy.ndarray' object is not callable
- Numpy.apply_along_axis 在应用具有 if else 条件的函数时意外工作
- numpy.random.random & numpy.ndarray.astype & numpy.arange
- numpy.ravel()/numpy.flatten()/numpy.squeeze()
Python numpy 模块-apply_along_axis() 实例源码(numpy.apply_along_axis)
Python numpy 模块,apply_along_axis() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.apply_along_axis()。
- def _logcdf(self, samples):
- lower = np.full(2, -np.inf)
- upper = norm.ppf(samples)
- limit_flags = np.zeros(2)
- if upper.shape[0] > 0:
- def func1d(upper1d):
- ''''''
- Calculates the multivariate normal cumulative distribution
- function of a single sample.
- ''''''
- return mvn.mvndst(lower, upper1d, limit_flags, self.theta)[1]
- vals = np.apply_along_axis(func1d, -1, upper)
- else:
- vals = np.empty((0, ))
- old_settings = np.seterr(divide=''ignore'')
- vals = np.log(vals)
- np.seterr(**old_settings)
- vals[np.any(samples == 0.0, axis=1)] = -np.inf
- vals[samples[:, 0] == 1.0] = np.log(samples[samples[:, 0] == 1.0, 1])
- vals[samples[:, 1] == 1.0] = np.log(samples[samples[:, 1] == 1.0, 0])
- return vals
- def _nanmedian(a, axis=None, out=None, overwrite_input=False):
- """
- Private function that doesn''t support extended axis or keepdims.
- These methods are extended to this function using _ureduce
- See nanmedian for parameter usage
- """
- if axis is None or a.ndim == 1:
- part = a.ravel()
- if out is None:
- return _nanmedian1d(part, overwrite_input)
- else:
- out[...] = _nanmedian1d(part, overwrite_input)
- return out
- else:
- # for small medians use sort + indexing which is still faster than
- # apply_along_axis
- if a.shape[axis] < 400:
- return _nanmedian_small(a, axis, out, overwrite_input)
- result = np.apply_along_axis(_nanmedian1d, a, overwrite_input)
- if out is not None:
- out[...] = result
- return result
- def _nanpercentile(a, q, overwrite_input=False,
- interpolation=''linear'', keepdims=False):
- """
- Private function that doesn''t support extended axis or keepdims.
- These methods are extended to this function using _ureduce
- See nanpercentile for parameter usage
- """
- if axis is None:
- part = a.ravel()
- result = _nanpercentile1d(part, overwrite_input, interpolation)
- else:
- result = np.apply_along_axis(_nanpercentile1d,
- overwrite_input, interpolation)
- # apply_along_axis fills in collapsed axis with results.
- # Move that axis to the beginning to match percentile''s
- # convention.
- if q.ndim != 0:
- result = np.rollaxis(result, axis)
- if out is not None:
- out[...] = result
- return result
- def _fix_alpha_channel(self):
- # This is a fix for a bug where the Alpha channel was dropped.
- colors3to4 = [(c[:3], c[3]) for c in self.names.keys()]
- colors3to4 = dict(colors3to4)
- assert(len(colors3to4) == len(self.names)) # Dropped alpha channel causes colors to collide :(
- for lbl in self.labels:
- if lbl is None:
- continue # No label file created yet.
- img = Image.open(lbl)
- size = img.size
- img = np.array(img)
- if img.shape[2] == 4:
- continue # Image has alpha channel,good.
- elif img.shape[2] == 3:
- # Lookup each (partial) color and find what its alpha should be.
- alpha = np.apply_along_axis(lambda c: colors3to4[tuple(c)], 2, img)
- data = np.dstack([img, np.array(alpha, dtype=np.uint8)])
- new_img = Image.frombuffer("RGBA", size, data, "raw", "RGBA", 0, 1)
- new_img.save(lbl)
- print("FIXED", lbl)
- def plot_cost_to_go_mountain_car(env, estimator, num_tiles=20):
- x = np.linspace(env.observation_space.low[0], env.observation_space.high[0], num=num_tiles)
- y = np.linspace(env.observation_space.low[1], env.observation_space.high[1], num=num_tiles)
- X, Y = np.meshgrid(x, y)
- Z = np.apply_along_axis(lambda _: -np.max(estimator.predict(_)), np.dstack([X, Y]))
- fig = plt.figure(figsize=(10, 5))
- ax = fig.add_subplot(111, projection=''3d'')
- surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
- cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0)
- ax.set_xlabel(''Position'')
- ax.set_ylabel(''VeLocity'')
- ax.set_zlabel(''Value'')
- ax.set_title("Mountain \\"Cost To Go\\" Function")
- fig.colorbar(surf)
- plt.show()
- def boundary_tree_to_image(boundary_tree, image_mesh):
- arr = array(''B'')
- np.apply_along_axis(lambda c: arr.extend(boundary_tree.query(c)), 1, image_mesh)
- return Image.frombytes("RGB", arr)
- def __query_by_committee(self, clf, X_unlabeled):
- num_classes = len(clf[0].classes_)
- C = len(clf)
- preds = []
- if self.strategy == ''Vote_entropy'':
- for model in clf:
- y_out = map(int, model.predict(X_unlabeled))
- preds.append(np.eye(num_classes)[y_out])
- Votes = np.apply_along_axis(np.sum, np.stack(preds)) / C
- return np.apply_along_axis(entropy, Votes)
- elif self.strategy == ''average_kl_divergence'':
- for model in clf:
- preds.append(model.predict_proba(X_unlabeled))
- consensus = np.mean(np.stack(preds), axis=0)
- divergence = []
- for y_out in preds:
- divergence.append(entropy(consensus.T, y_out.T))
- return np.apply_along_axis(np.mean, np.stack(divergence))
- def estimate_1hot_cost(X, is_categorical):
- """
- Calculate the "memory expansion" after applying one-hot encoding.
- :param X: array-like
- The input data array
- :param is_categorical: boolean array-like
- Array of vector form that indicates
- whether each features of X is categorical
- :return: int
- Calculated memory size in byte scale (expansion)
- """
- n_columns = 0
- count_labels_v = lambda v: np.sum(np.isfinite(np.unique(v))) - 1
- n_labels = np.apply_along_axis(count_labels_v, X)
- n_columns += np.sum(n_labels[is_categorical])
- estimated_memory = n_columns * X.shape[0] * X.dtype.itemsize
- return estimated_memory
- def load_dataset():
- if(not os.path.exists("./dataset/training.csv")):
- print("dataset does not exist")
- raise Exception
- #load dataset
- labeled_image = pd.read_csv("./dataset/training.csv")
- #preprocessing dataframe
- image = np.array(labeled_image["Image"].values).reshape(-1,1)
- image = np.apply_along_axis(lambda img: (img[0].split()),1,image)
- image = image.astype(np.int32) #because train_img elements are string before preprocessing
- image = image.reshape(-1,96*96) # data 96 * 96 size image
- label = labeled_image.values[:,:-1]
- label = label.astype(np.float32)
- #nan value to mean value
- col_mean = np.nanmean(label, axis=0)
- indices = np.where(np.isnan(label))
- label[indices] = np.take(col_mean, indices[1])
- return image, label
- def get_his_std_qi( data_pixel_qi, max_cts=None):
- ''''''
- YG. Dev 16,2016
- Calculate the photon histogram for one q by giving
- Parameters:
- data_pixel_qi: one-D array,for the photon counts
- max_cts: for bin max,bin will be [0,1,2,...,max_cts]
- Return:
- bins
- his
- std
- ''''''
- if max_cts is None:
- max_cts = np.max( data_pixel_qi ) +1
- bins = np.arange(max_cts)
- dqn, dqm = data_pixel_qi.shape
- #get histogram here
- H = np.apply_along_axis(np.bincount, np.int_(data_pixel_qi), minlength= max_cts )/dqm
- #do average for different frame
- his = np.average( H, axis=0)
- std = np.std( H, axis=0 )
- #cal average photon counts
- kmean= np.average(data_pixel_qi )
- return bins, his, std, kmean
- def _nanmedian(a, overwrite_input)
- if out is not None:
- out[...] = result
- return result
- def _nanpercentile(a, axis)
- if out is not None:
- out[...] = result
- return result
- def punion(probs, axis=None):
- """Find the unions of given list of probabilities assuming indepdendence.
- Args:
- probs: Matrix-like probabilities to union.
- axis: Axis along which union will be performed.
- Returns:
- Matrix of probability unions.
- """
- def punion1d(probs):
- """Union for 1d array.
- """
- finalp = 0.0
- for p in probs:
- finalp += p*(1.0-finalp)
- return finalp
- probs = np.asarray(probs)
- if axis is None:
- return punion1d(probs.reshape((-1,)))
- else:
- return np.apply_along_axis(func1d=punion1d, axis=axis, arr=probs)
- def addFamily(X):
- # Family size: index 8
- newCol = np.array(X[:, 1] + X[:, 2], np.newaxis)
- newCol = newCol.reshape((len(newCol), 1))
- X = np.hstack( (X,newCol) )
- # Family category: index 9
- def determineFamilyCat(row):
- # print(''row shape = {},cont = {}''.format(row.shape,row))
- if row[8] == 1:
- return 0 # singles
- elif 2<=row[8]<=4:
- return 1 # normal size
- else:
- return 2 # large size
- newCol = np.apply_along_axis(determineFamilyCat, X)
- newCol = newCol.reshape((len(newCol), 1))
- X = np.hstack((X,newCol))
- return X
- # Not used
- def update_recover_maps(self):
- max_distance = min(self.width // 2, self.height // 2, 15)
- #self.recover_map = np.zeros((max_distance + 1,self.width,self.height))
- #self.recover_map[0] = np.divide(self.strength_map,self.production_map_01) * (self.is_neutral_map - self.combat_zone_map)
- self.prod_over_str_map = np.zeros((max_distance + 1, self.width, self.height))
- #self.prod_over_str_map[0] = np.divide(self.production_map,self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
- new_str_map = np.copy(self.strength_map)
- new_str_map[new_str_map == 0] = 2
- #self.prod_over_str_map[0] = np.divide(self.production_map,self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
- self.prod_over_str_map[0] = np.divide(self.production_map, new_str_map) * (self.is_neutral_map - self.combat_zone_map)
- #self.recover_map[0] = 1 / np.maximum(self.prod_over_str_map[0],0.01)
- for distance in range(1, max_distance + 1):
- self.prod_over_str_map[distance] = spread_n(self.prod_over_str_map[distance - 1], 1)
- self.prod_over_str_map[distance][self.prod_over_str_map[distance-1] == 0] = 0
- self.prod_over_str_map[distance] = self.prod_over_str_map[distance] / 5
- #self.recover_map[distance] = 1 / np.maximum(self.prod_over_str_map[distance],0.01)
- self.prod_over_str_max_map = np.apply_along_axis(np.max, self.prod_over_str_map)
- #self.recover_max_map = 1 / np.maximum(self.prod_over_str_max_map,0.01)
- self.prod_over_str_avg_map = np.apply_along_axis(np.mean, self.prod_over_str_map)
- #self.recover_avg_map = 1 / np.maximum(self.prod_over_str_avg_map,0.01)
- self.prod_over_str_wtd_map = (self.prod_over_str_max_map + self.prod_over_str_avg_map) / 2
- self.recover_wtd_map = 1 / np.maximum(self.prod_over_str_wtd_map, 0.01)
- def update_recover_maps(self):
- max_distance = min(self.width // 2, 0.01)
- def update_recover_maps(self):
- max_distance = min(self.width // 2, 0.01)
- def update_recover_maps(self):
- max_distance = self.width // 2
- self.recover_map = np.zeros((max_distance + 1, self.height))
- self.recover_map[0] = np.divide(self.strength_map, self.production_map_01) * (self.is_neutral_map - self.combat_zone_map)
- self.prod_over_str_map = np.zeros((max_distance + 1, new_str_map) * (self.is_neutral_map - self.combat_zone_map)
- self.recover_map[0] = 1 / np.maximum(self.prod_over_str_map[0], 0.01)
- for distance in range(1, 1)
- self.prod_over_str_map[distance][self.prod_over_str_map[distance-1] == 0] = 0
- self.prod_over_str_map[distance] = self.prod_over_str_map[distance] / 5
- self.recover_map[distance] = 1 / np.maximum(self.prod_over_str_map[distance], 0.01)
- self.prod_over_str_max_map = np.apply_along_axis(np.max, self.prod_over_str_map)
- self.recover_max_map = 1 / np.maximum(self.prod_over_str_max_map, 0.01)
- self.prod_over_str_avg_map = np.apply_along_axis(np.mean, self.prod_over_str_map)
- self.recover_avg_map = 1 / np.maximum(self.prod_over_str_avg_map, 0.01)
- self.prod_over_str_wtd_map = (self.prod_over_str_max_map + self.prod_over_str_avg_map) / 2
- self.recover_wtd_map = 1 / np.maximum(self.prod_over_str_wtd_map, 0.01)
- def update_recover_maps(self):
- max_distance = self.width // 2
- self.recover_map = np.zeros((max_distance + 1, 0.01)
- def update_recover_maps(self):
- max_distance = min(self.width // 2, 0.01)
- def update_recover_maps(self):
- max_distance = self.width // 2
- self.recover_map = np.zeros((max_distance + 1, 0.01)
- def update_recover_maps(self):
- max_distance = self.width // 2
- self.recover_map = np.zeros((max_distance + 1, 0.01)
- def update_recover_maps(self):
- max_distance = self.width // 2
- self.recover_map = np.zeros((max_distance + 1, 0.01)
- def update_recover_maps(self):
- max_distance = self.width // 2
- self.recover_map = np.zeros((max_distance + 1, 0.01)
- def update_recover_maps(self):
- max_distance = self.width // 2
- self.recover_map = np.zeros((max_distance + 1, 0.01)
- def update_recover_maps(self):
- max_distance = self.width // 2
- self.recover_map = np.zeros((max_distance + 1, 0.01)
- def match_matrix(event: Event):
- """Returns a numpy participation matrix for the qualification matches in this event,used for calculating OPR.
- Each row in the matrix corresponds to a single alliance in a match,meaning that there will be two rows (one for
- red,one for blue) per match. Each column represents a single team,ordered by team number. If a team participated
- on a certain alliance,the value at that row and column would be 1,otherwise,it would be 0. For example,an
- event with teams 1-7 that featured a match that pitted teams 1,3,and 5 against 2,4,and 6 would have a match
- matrix that looks like this (sans labels):
- #1 #2 #3 #4 #5 #6 #7
- qm1_red 1 0 1 0 1 0 0
- qm1_blue 0 1 0 1 0 1 0
- """
- match_list = []
- for match in filter(lambda match: match[''comp_level''] == ''qm'', event.matches):
- matchRow = []
- for team in event.teams:
- matchRow.append(1 if team[''key''] in match[''alliances''][''red''][''teams''] else 0)
- match_list.append(matchRow)
- matchRow = []
- for team in event.teams:
- matchRow.append(1 if team[''key''] in match[''alliances''][''blue''][''teams''] else 0)
- match_list.append(matchRow)
- mat = numpy.array(match_list)
- sum_matches = numpy.sum(mat, axis=0)
- avg_team_matches = sum(sum_matches) / float(len(sum_matches))
- return mat[:, numpy.apply_along_axis(numpy.count_nonzero, mat) > avg_team_matches - 2]
- def cluster_words(words, service_name, size):
- stopwords = ["GET", "POST", "total", "http-requests", "-", "_"]
- cleaned_words = []
- for word in words:
- for stopword in stopwords:
- word = word.replace(stopword, "")
- cleaned_words.append(word)
- def distance(coord):
- i, j = coord
- return 1 - jaro_distance(cleaned_words[i], cleaned_words[j])
- indices = np.triu_indices(len(words), 1)
- distances = np.apply_along_axis(distance, indices)
- return cluster_of_size(linkage(distances), size)
- def permute_rows(seed, array):
- """
- Shuffle each row in ``array`` based on permutations generated by ``seed``.
- Parameters
- ----------
- seed : int
- Seed for numpy.RandomState
- array : np.ndarray[ndim=2]
- Array over which to apply permutations.
- """
- rand = np.random.RandomState(seed)
- return np.apply_along_axis(rand.permutation, array)
- def __detect_Now(self,spike_waveforms,selectChan,current_page):
- if selectChan+"_"+str(current_page) in self.windowsstate:
- use_shape0 = self.__pk0_roi0_pos(selectChan,current_page)
- spk_in_line = np.apply_along_axis(self.__in_select_line,use_shape0[0],use_shape0[1])
- use_shape1 = self.__pk0_roi1_pos(selectChan,current_page)
- spk_in_line1 = np.apply_along_axis(self.__in_select_line,use_shape1[0],use_shape1[1])
- detected_mask = spk_in_line & spk_in_line1
- else:
- detected_mask = np.ones(spike_waveforms.shape[0],dtype=bool)
- return detected_mask
- # check whether a spike''s waveform is intersect with segment widget
- def __in_select_line(self,temp_spike,pos_1,pos_2):
- pos_3y = temp_spike[:-1]
- pos_3x = np.ones(pos_3y.shape,dtype=np.int32)*np.arange(pos_3y.shape[0])
- pos_4y = temp_spike[1:]
- pos_4x = np.ones(pos_3y.shape,dtype=np.int32)*np.arange(1,pos_3y.shape[0]+1)
- pos_3_4 = np.vstack([ pos_3x,pos_3y,pos_4x,pos_4y]).T
- is_insect = np.apply_along_axis(self.__intersect,pos_3_4,pos_2)
- return np.any(is_insect)
- def __indexs_select_pk0(self,pk0_roi0_h0,pk0_roi0_h1,pk0_roi1_h0,pk0_roi1_h1):
- # get indexs of selected waveforms in pk0
- spk_in_line = np.apply_along_axis(self.__in_select_line,self.waveforms_pk0,pk0_roi0_h1)
- changed_index = np.where(spk_in_line==True)[0]
- changed_index = np.array(changed_index,dtype=np.int32)
- spk_in_line1 = np.apply_along_axis(self.__in_select_line,pk0_roi1_h1)
- changed_index1 = np.where(spk_in_line1==True)[0]
- changed_index1 = np.array(changed_index1,dtype=np.int32)
- changed_index = np.intersect1d(changed_index, changed_index1)
- return changed_index + self.indexs_pk0[0]
- def __in_select_line(self,pos_2)
- return np.any(is_insect)
- def __indexs_select_pk2(self,pk2_roi_pos):
- x_min = pk2_roi_pos[:,0].min()
- x_max = pk2_roi_pos[:,0].max()
- y_min = pk2_roi_pos[:,1].min()
- y_max = pk2_roi_pos[:,1].max()
- pca_1,pca_2 = self.PCAusedList.currentText().split("-")
- pca_1 = np.int(pca_1)-1
- pca_2 = np.int(pca_2)-1
- x = np.logical_and(self.wavePCAs[:,pca_1]>x_min, \\
- self.wavePCAs[:,pca_1]<x_max)
- y = np.logical_and(self.wavePCAs[:,pca_2]>y_min,pca_2]<y_max)
- ind_0 = np.logical_and(x, y)
- ind_0 = np.where(ind_0 == True)[0]
- ind_0 = np.array(ind_0,dtype=np.int32)
- if ind_0.shape[0]>0:
- segments = []
- for i in range(pk2_roi_pos.shape[0]-1):
- segments.append([pk2_roi_pos[i],pk2_roi_pos[i+1]])
- segments.append([pk2_roi_pos[-1],pk2_roi_pos[0]])
- segments = np.array(segments)
- temp_pcas = self.wavePCAs[ind_0]
- temp_pcas = temp_pcas[:,[pca_1,pca_2]]
- is_intersect = np.apply_along_axis(self.__intersect_roi2,temp_pcas,segments,pca_1)
- return ind_0[is_intersect]
- else:
- return np.array([],dtype=np.int32)
- def __detect_Now(self,dtype=bool)
- return detected_mask
- # check whether a spike''s waveform is intersect with segment widget
- def __in_select_line(self,pos_2)
- return np.any(is_insect)
- def __indexs_select_pk0(self, changed_index1)
- return changed_index + self.indexs_pk0[0]
- def __in_select_line(self,pos_2)
- return np.any(is_insect)
- def __indexs_select_pk2(self,dtype=np.int32)
- def normalize_simple(matrix, mask):
- """normalizes a matrix by columns,and then by rows. With multiple
- time-series,the data are normalized to the within-series total,not the
- entire data set total.
- Parameters
- ----------
- matrix: np.matrix
- Time-series matrix of abundance counts. Rows are sequences,columns
- are samples/time-points.
- mask: list or np.array
- List of objects with length matching the number of timepoints,where
- unique values delineate multiple time-series. If there is only one
- time-series in the data set,it''s a list of identical objects.
- Returns
- -------
- normal_matrix: np.matrix
- Matrix where the columns (within-sample) have been converted to
- proportions,then the rows are normalized to sum to 1.
- """
- normal_matrix = matrix / matrix.sum(0)
- normal_matrix[np.invert(np.isfinite(normal_matrix))] = 0
- for mask_val in np.unique(mask):
- y = normal_matrix[:, np.where(mask == mask_val)[0]]
- y = np.apply_along_axis(zscore, y)
- normal_matrix[:, np.where(mask == mask_val)[0]] = y
- del y
- return normal_matrix
- def feat_eeg(signals):
- """
- calculate the relative power as defined by Leangkvist (2012),
- assuming signal is recorded with 100hz
- """
- if signals.ndim == 1: signals = np.expand_dims(signals,0)
- sfreq = use_sfreq
- nsamp = float(signals.shape[1])
- feats = np.zeros((signals.shape[0],9),dtype=''float32'')
- # 5 FEATURE for freq babnds
- w = (fft(signals,axis=1)).real
- delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
- theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq,axis=1)
- alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq,axis=1)
- beta = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq,axis=1)
- gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq,axis=1) # only until 50,because hz=100
- spindle = np.sum(np.abs(w[:,np.arange(12*nsamp/sfreq,14*nsamp/sfreq,axis=1)
- sum_abs_pow = delta + theta + alpha + beta + gamma + spindle
- feats[:,0] = delta /sum_abs_pow
- feats[:,1] = theta /sum_abs_pow
- feats[:,2] = alpha /sum_abs_pow
- feats[:,3] = beta /sum_abs_pow
- feats[:,4] = gamma /sum_abs_pow
- feats[:,5] = spindle /sum_abs_pow
- feats[:,6] = np.log10(stats.kurtosis(signals, fisher=False, axis=1)) # kurtosis
- feats[:,7] = np.log10(-np.sum([(x/nsamp)*(np.log(x/nsamp)) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], signals)],axis=1)) # entropy.. yay,one line...
- #feats[:,7] = np.polynomial.polynomial.polyfit(np.log(f[np.arange(0.5*nsamp/sfreq,50*nsamp/sfreq,dtype=int)]),np.log(w[0,np.arange(0.5*nsamp/sfreq,1)
- feats[:,8] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5)
- if np.any(feats==np.nan): print(''NaN detected'')
- return np.nan_to_num(feats)
- def feat_wavelet(signals):
- """
- calculate the relative power as defined by Leangkvist (2012),8),because hz=100
- sum_abs_pow = delta + theta + alpha + beta + gamma
- feats[:,5] = np.log10(stats.kurtosis(signals,fisher=False,axis=1)) # kurtosis
- feats[:,6] = np.log10(-np.sum([(x/nsamp)*(np.log(x/nsamp)) for x in np.apply_along_axis(lambda x: np.histogram(x,7] = np.dot(np.array([3.5,0:5].T ) / (sfreq/2-0.5)
- if np.any(feats==np.nan): print(''NaN detected'')
- return np.nan_to_num(feats)
- def feat_eog(signals):
- """
- calculate the EOG features
- :param signals: 1D or 2D signals
- """
- if signals.ndim == 1: signals = np.expand_dims(signals,0)
- sfreq = use_sfreq
- nsamp = float(signals.shape[1])
- w = (fft(signals,axis=1)).real
- feats = np.zeros((signals.shape[0],15),dtype=''float32'')
- delta = np.sum(np.abs(w[:,5] = np.dot(np.array([3.5,0:5].T ) / (sfreq/2-0.5) #smean
- feats[:,6] = np.sqrt(np.max(signals, axis=1)) #PAV
- feats[:,7] = np.sqrt(np.abs(np.min(signals, axis=1))) #VAV
- feats[:,8] = np.argmax(signals, axis=1)/nsamp #PAP
- feats[:,9] = np.argmin(signals, axis=1)/nsamp #VAP
- feats[:,10] = np.sqrt(np.sum(np.abs(signals), axis=1)/ np.mean(np.sum(np.abs(signals), axis=1))) # AUC
- feats[:,11] = np.sum(((np.roll(np.sign(signals),axis=1) - np.sign(signals)) != 0).astype(int),axis=1)/nsamp #TVC
- feats[:,12] = np.log10(np.std(signals, axis=1)) #STD/VAR
- feats[:,13] = np.log10(stats.kurtosis(signals,axis=1)) # kurtosis
- feats[:,14] = np.log10(-np.sum([(x/nsamp)*((np.log((x+np.spacing(1))/nsamp))) for x in np.apply_along_axis(lambda x: np.histogram(x,one line...
- if np.any(feats==np.nan): print(''NaN detected'')
- return np.nan_to_num(feats)
- def feat_emg(signals):
- """
- calculate the EMG median as defined by Leangkvist (2012),
- """
- if signals.ndim == 1: signals = np.expand_dims(signals,13),0:5].T ) / (sfreq/2-0.5) #smean
- emg = np.sum(np.abs(w[:,np.arange(12.5*nsamp/sfreq,32*nsamp/sfreq,axis=1)
- feats[:,6] = emg / np.sum(np.abs(w[:,axis=1) # ratio of high freq to total motor
- feats[:,7] = np.median(np.abs(w[:,axis=1) # median freq
- feats[:,8] = np.mean(np.abs(w[:,axis=1) # mean freq
- feats[:,9] = np.std(signals, axis=1) # std
- feats[:,10] = np.mean(signals,11] = np.log10(stats.kurtosis(signals,axis=1) )
- feats[:,12] = np.log10(-np.sum([(x/nsamp)*((np.log((x+np.spacing(1))/nsamp))) for x in np.apply_along_axis(lambda x: np.histogram(x,one line...
- if np.any(feats==np.nan): print(''NaN detected'')
- return np.nan_to_num(feats)
- def jaccard(inclusion):
- """Calculate jaccard distances for a community."""
- logger.info("calculating jaccard distance for {}x{} input matrix".format(
- *inclusion.shape))
- jaccard = np.apply_along_axis(
- lambda a: (a & inclusion).sum(1), inclusion)
- jaccard = jaccard / np.apply_along_axis(
- lambda a: (a | inclusion).sum(1), inclusion)
- return 1 - jaccard
- def euclidean(inclusion):
- """Calculate euclidean distances for a community."""
- logger.info("calculating euclidean distance for {}x{} input matrix".format(
- *inclusion.shape))
- euclidean = np.apply_along_axis(
- lambda a: ((a - inclusion) ** 2).sum(1), inclusion)
- return np.sqrt(euclidean)
- def calc_pairwise_cosine(model):
- n = model.num_topics
- weights = model.state.get_lambda()
- weights = np.apply_along_axis(lambda x: x / x.sum(), weights) # get dist.
- weights = unitmatrix(weights) # normalize
- score = []
- for i in range(n):
- for j in range(i + 1, n):
- score.append(np.arccos(weights[i].dot(weights[j])))
- return np.mean(score), np.std(score)
- def calc_pairwise_dev(model):
- # the average squared deviation from 0 (90 degree)
- n = model.num_topics
- weights = model.state.get_lambda()
- weights = np.apply_along_axis(lambda x: x / x.sum(), weights) # get dist.
- weights = unitmatrix(weights) # normalize
- score = 0.
- for i in range(n):
- for j in range(i + 1, n):
- score += (weights[i].dot(weights[j]))**2
- return np.sqrt(2. * score / n / (n - 1))
- def decode(self, X, mode=''argmax''):
- if mode == ''argmax'':
- X = X.argmax(axis=-1)
- elif mode == ''choice'':
- X = np.apply_along_axis(lambda vec: \\
- np.random.choice(len(vec),
- p=(vec / np.sum(vec))),
- axis=-1, arr=X).ravel()
- return str.join('''',(self.indices_char[x] for x in X))
- def _nanmedian_small(a, overwrite_input=False):
- """
- sort + indexing median,faster for small medians along multiple
- dimensions due to the high overhead of apply_along_axis
- see nanmedian for parameter usage
- """
- a = np.ma.masked_array(a, np.isnan(a))
- m = np.ma.median(a, overwrite_input=overwrite_input)
- for i in range(np.count_nonzero(m.mask.ravel())):
- warnings.warn("All-NaN slice encountered", RuntimeWarning)
- if out is not None:
- out[...] = m.filled(np.nan)
- return out
- return m.filled(np.nan)
Jupyter 中的 Numpy 在打印时出错(Python 版本 3.8.8):TypeError: 'numpy.ndarray' object is not callable
如何解决Jupyter 中的 Numpy 在打印时出错(Python 版本 3.8.8):TypeError: ''numpy.ndarray'' object is not callable?
晚安, 尝试打印以下内容时,我在 jupyter 中遇到了 numpy 问题,并且得到了一个 错误: 需要注意的是python版本是3.8.8。 我先用 spyder 测试它,它运行正确,它给了我预期的结果
使用 Spyder:
import numpy as np
for i in range (5):
n = np.random.rand ()
print (n)
Results
0.6604903457995978
0.8236300859753154
0.16067650689842816
0.6967868357083673
0.4231597934445466
现在有了 jupyter
import numpy as np
for i in range (5):
n = np.random.rand ()
print (n)
-------------------------------------------------- ------
TypeError Traceback (most recent call last)
<ipython-input-78-0c6a801b3ea9> in <module>
2 for i in range (5):
3 n = np.random.rand ()
----> 4 print (n)
TypeError: ''numpy.ndarray'' object is not callable
感谢您对我如何在 Jupyter 中解决此问题的帮助。
非常感谢您抽出宝贵时间。
阿特,约翰”
解决方法
暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!
如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。
小编邮箱:dio#foxmail.com (将#修改为@)
Numpy.apply_along_axis 在应用具有 if else 条件的函数时意外工作
如何解决Numpy.apply_along_axis 在应用具有 if else 条件的函数时意外工作
我无法获得意想不到的结果。下面的代码以最简单的方式重现了结果(f
只是一个测试函数):
#returns absolute difference between last and first element in an array
def f(arr):
return 0 if arr[-1] == arr[0] else abs(arr[-1]-arr[0])
def test_vectorized(test_arr,window = 2):
T = test_arr.shape[0]
#create sliding windows
slide_windows = np.expand_dims(np.arange(window+1),axis=0) + np.expand_dims(np.arange(T - window),axis=0).T
print(slide_windows)
slide_values = test_arr[slide_windows]
print(slide_values)
#apply function to each sliding window
return np.apply_along_axis(f,axis=1,arr=slide_values)
#testing
test_arr = np.array([27.75,27.71,28.05,27.75,26.55,27.18])
test_vectorized(test_arr,window=3)
#Output
[[0 1 2 3]
[1 2 3 4]
[2 3 4 5]]
[[27.75 27.71 28.05 27.75]
[27.71 28.05 27.75 26.55]
[28.05 27.75 26.55 27.18]]
Out[238]:
array([0,1,0])
代码应该返回array([0,1.16,0.87])
,即每个滑动数组中第一个和最后一个元素之间的绝对差。
我正在使用带有 python 3.8.2 的 Jupyter 笔记本。我花了一个多小时调试,但似乎代码本身没有问题。有人可以帮忙吗?高度赞赏。
解决方法
您的函数 .as-console-wrapper {max-height: 100% !important; top: 0}
返回整数。
你必须使用:
f
附言
您的函数 def f(arr):
return float(0 if arr[-1] == arr[0] else abs(arr[-1]-arr[0]))
[[0 1 2 3]
[1 2 3 4]
[2 3 4 5]]
[[27.75 27.71 28.05 27.75]
[27.71 28.05 27.75 26.55]
[28.05 27.75 26.55 27.18]]
[0. 1.16 0.87]
可以推广为简单的 f
,因为它涵盖了 return abs(arr[-1]-arr[0])
情况。您不需要 0
语句。
numpy.random.random & numpy.ndarray.astype & numpy.arange
今天看到这样一句代码:
xb = np.random.random((nb, d)).astype(''float32'') #创建一个二维随机数矩阵(nb行d列)
xb[:, 0] += np.arange(nb) / 1000. #将矩阵第一列的每个数加上一个值
要理解这两句代码需要理解三个函数
1、生成随机数
numpy.random.random(size=None)
size为None时,返回float。
size不为None时,返回numpy.ndarray。例如numpy.random.random((1,2)),返回1行2列的numpy数组
2、对numpy数组中每一个元素进行类型转换
numpy.ndarray.astype(dtype)
返回numpy.ndarray。例如 numpy.array([1, 2, 2.5]).astype(int),返回numpy数组 [1, 2, 2]
3、获取等差数列
numpy.arange([start,]stop,[step,]dtype=None)
功能类似python中自带的range()和numpy中的numpy.linspace
返回numpy数组。例如numpy.arange(3),返回numpy数组[0, 1, 2]
numpy.ravel()/numpy.flatten()/numpy.squeeze()
numpy.ravel(a, order=''C'')
Return a flattened array
numpy.chararray.flatten(order=''C'')
Return a copy of the array collapsed into one dimension
numpy.squeeze(a, axis=None)
Remove single-dimensional entries from the shape of an array.
相同点: 将多维数组 降为 一维数组
不同点:
ravel() 返回的是视图(view),意味着改变元素的值会影响原始数组元素的值;
flatten() 返回的是拷贝,意味着改变元素的值不会影响原始数组;
squeeze()返回的是视图(view),仅仅是将shape中dimension为1的维度去掉;
ravel()示例:
1 import matplotlib.pyplot as plt
2 import numpy as np
3
4 def log_type(name,arr):
5 print("数组{}的大小:{}".format(name,arr.size))
6 print("数组{}的维度:{}".format(name,arr.shape))
7 print("数组{}的维度:{}".format(name,arr.ndim))
8 print("数组{}元素的数据类型:{}".format(name,arr.dtype))
9 #print("数组:{}".format(arr.data))
10
11 a = np.floor(10*np.random.random((3,4)))
12 print(a)
13 log_type(''a'',a)
14
15 a1 = a.ravel()
16 print("a1:{}".format(a1))
17 log_type(''a1'',a1)
18 a1[2] = 100
19
20 print(a)
21 log_type(''a'',a)
flatten()示例
1 import matplotlib.pyplot as plt
2 import numpy as np
3
4 def log_type(name,arr):
5 print("数组{}的大小:{}".format(name,arr.size))
6 print("数组{}的维度:{}".format(name,arr.shape))
7 print("数组{}的维度:{}".format(name,arr.ndim))
8 print("数组{}元素的数据类型:{}".format(name,arr.dtype))
9 #print("数组:{}".format(arr.data))
10
11 a = np.floor(10*np.random.random((3,4)))
12 print(a)
13 log_type(''a'',a)
14
15 a1 = a.flatten()
16 print("修改前a1:{}".format(a1))
17 log_type(''a1'',a1)
18 a1[2] = 100
19 print("修改后a1:{}".format(a1))
20
21 print("a:{}".format(a))
22 log_type(''a'',a)
squeeze()示例:
1. 没有single-dimensional entries的情况
1 import matplotlib.pyplot as plt
2 import numpy as np
3
4 def log_type(name,arr):
5 print("数组{}的大小:{}".format(name,arr.size))
6 print("数组{}的维度:{}".format(name,arr.shape))
7 print("数组{}的维度:{}".format(name,arr.ndim))
8 print("数组{}元素的数据类型:{}".format(name,arr.dtype))
9 #print("数组:{}".format(arr.data))
10
11 a = np.floor(10*np.random.random((3,4)))
12 print(a)
13 log_type(''a'',a)
14
15 a1 = a.squeeze()
16 print("修改前a1:{}".format(a1))
17 log_type(''a1'',a1)
18 a1[2] = 100
19 print("修改后a1:{}".format(a1))
20
21 print("a:{}".format(a))
22 log_type(''a'',a)
从结果中可以看到,当没有single-dimensional entries时,squeeze()返回额数组对象是一个view,而不是copy。
2. 有single-dimentional entries 的情况
1 import matplotlib.pyplot as plt
2 import numpy as np
3
4 def log_type(name,arr):
5 print("数组{}的大小:{}".format(name,arr.size))
6 print("数组{}的维度:{}".format(name,arr.shape))
7 print("数组{}的维度:{}".format(name,arr.ndim))
8 print("数组{}元素的数据类型:{}".format(name,arr.dtype))
9 #print("数组:{}".format(arr.data))
10
11 a = np.floor(10*np.random.random((1,3,4)))
12 print(a)
13 log_type(''a'',a)
14
15 a1 = a.squeeze()
16 print("修改前a1:{}".format(a1))
17 log_type(''a1'',a1)
18 a1[2] = 100
19 print("修改后a1:{}".format(a1))
20
21 print("a:{}".format(a))
22 log_type(''a'',a)
关于Python numpy 模块-apply_along_axis() 实例源码和numpy.apply_along_axis的问题就给大家分享到这里,感谢你花时间阅读本站内容,更多关于Jupyter 中的 Numpy 在打印时出错(Python 版本 3.8.8):TypeError: 'numpy.ndarray' object is not callable、Numpy.apply_along_axis 在应用具有 if else 条件的函数时意外工作、numpy.random.random & numpy.ndarray.astype & numpy.arange、numpy.ravel()/numpy.flatten()/numpy.squeeze()等相关知识的信息别忘了在本站进行查找喔。
本文标签: