网站制作价格_给我一个网站_免费网站推广_天津网站推广   网站性能:性能这个词涉及到的面是相当大的。它既有着自己的客观指标,也涉及用户的客观感受;而且,在不同视角下,各方的关注点也不一样。本部分的主要内容就是如何构建一个高性能的网站;通过分析不同层面下的网站优化措施,从而实现在性能测试的前提下进行针对性优化。">

中网建站

全球建站、苏州网站维护、永久使用、不限域名、天蝎网站建设公司

南宁百度seo?

网站制作价格_给我一个网站_免费网站推广_天津网站推广

matrix2 = []class UserBasedCF(object):''' TopN recommendation - User Based Collaborative Filtering '''def __init__(self):self.trainset = {} # 训练集self.testset = {} # 测试集self.initialset = {} # 存储要推荐的用户的信息self.n_sim_user = 30self.n_rec_movie = 10self.movie_popular = {}self.movie_count = 0 # 总电影数量print('Similar user number = %d' % self.n_sim_user, file=sys.stderr)print('recommended movie number = %d' %self.n_rec_movie, file=sys.stderr)@staticmethoddef loadfile(filename):''' load a file, return a generator. '''fp = open(filename, 'r', encoding='UTF-8')for i, line in enumerate(fp):yield line.strip('\r\n')# if i % 100000 == 0:# print ('loading %s(%s)' % (filename, i), file=sys.stderr)fp.close()print('load %s success' % filename, file=sys.stderr)def initial_dataset(self, filename1):initialset_len = 0for lines in self.loadfile(filename1):users, movies, ratings = lines.split(',')self.initialset.setdefault(users, {})self.initialset[users][movies] = (ratings)initialset_len += 1def generate_dataset(self, filename2, pivot=1.0):''' load rating data and split it to training set and test set '''trainset_len = 0testset_len = 0for line in self.loadfile(filename2):# user, movie, rating, _ = line.split('::')user, movie, rating = line.split(',')# split the data by pivotif random.random() < pivot: # pivot=0.7应该表示训练集:测试集=7:3self.trainset.setdefault(user, {})self.trainset[user][movie] = (rating) # trainset[user][movie]可以获取用户对电影的评分 都是整数trainset_len += 1else:self.testset.setdefault(user, {})self.testset[user][movie] = (rating)testset_len += 1print('split training set and test set succ', file=sys.stderr)print('train set = %s' % trainset_len, file=sys.stderr)print('test set = %s' % testset_len, file=sys.stderr)def calc_user_sim(self):movie2users = dict()for user, movies in self.trainset.items():for movie in movies:# inverse table for item-usersif movie not in movie2users:movie2users[movie] = set()movie2users[movie].add(user) # 看这个电影的用户id# print(movie) #输出的是movieId# print(movie2users[movie]) #输出的是{'userId'...}# print(movie2users) #movieId:{'userId','userId'...}# count item popularity at the same timeif movie not in self.movie_popular:self.movie_popular[movie] = 0self.movie_popular[movie] += 1# print ('build movie-users inverse table succ', file=sys.stderr)# save the total movie number, which will be used in evaluationself.movie_count = len(movie2users)print('total movie number = %d' % self.movie_count, file=sys.stderr)# count co-rated items between users 计算用户之间共同评分的物品usersim_mat = user_sim_mat# print ('building user co-rated movies matrix...', file=sys.stderr)for movie, users in movie2users.items(): # 通过.items()遍历movie2users这个字典里的所有键、值for u in users:for v in users:if u == v:continueusersim_mat.setdefault(u, {})usersim_mat[u].setdefault(v, 0)usersim_mat[u][v] += 1 / math.log(1 + len(users)) # usersim_mat二维矩阵应该存的是用户u和用户v之间共同评分的电影数目# print ('build user co-rated movies matrix succ', file=sys.stderr)# calculate similarity matrix# print ('calculating user similarity matrix...', file=sys.stderr)simfactor_count = 0PRINT_STEP = 20000for u, related_users in usersim_mat.items():for v, count in related_users.items():usersim_mat[u][v] = count / math.sqrt(len(self.trainset[u]) * len(self.trainset[v]))simfactor_count += 1def recommend(self, user):''' Find K similar users and recommend N movies. '''matrix.clear() #每次都要清空K = self.n_sim_user # 这里等于20N = self.n_rec_movie # 这里等于10rank = dict() # 用户对电影的兴趣度# print(self.initialset[user])watched_movies = self.trainset[user] # user用户已经看过的电影 只包括训练集里的# 这里之后不能是训练集# watched_movies = self.initialset[user]for similar_user, similarity_factor in sorted(user_sim_mat[user].items(),key=itemgetter(1), reverse=True)[0:K]: # itemgetter(1)表示对第2个域(相似度)排序 reverse=TRUE表示降序for imdbid in self.trainset[similar_user]: # similar_user是items里面的键,就是所有用户 similarity_factor是值,就是对应的相似度if imdbid in watched_movies:continue # 如果该电影用户已经看过,则跳过# predict the user's "interest" for each movierank.setdefault(imdbid, 0) # 没有值就为0rank[imdbid] += similarity_factor #rank[movie]就是各个电影的相似度# 这里是把和各个用户的相似度加起来,而各个用户的相似度只是基于看过的公共电影数目除以这两个用户看过的电影数量积#print(rank[movie])# return the N best movies# rank_ = dict()rank_ = sorted(rank.items(), key=itemgetter(1), reverse=True)[0:N] #类型是list不是字典了for key,value in rank_:matrix.append(key) #matrix为存储推荐的imdbId号的数组#print(key) #得到了推荐的电影的imdbid号print(matrix)#return sorted(rank.items(), key=itemgetter(1), reverse=True)[0:N]return matrix# class UserBasedCF(object): 成都网络推广b2c seo商城网站广州网站排名武汉网络营销公司桂林网站排名天津网站推广合肥建设网站网页报价外贸网站设计定制app软件绯闻seo网络营销策划推广公司浙江seo南通seo湖南长沙seo宁波网站推广大连网站公司网站优化搭建网站需要多少钱泰州seo郑州seo优化徐州网站优化广州网站优化b2b2c多用户商城重庆seo教程西安网站空间重庆seob2b2c多用户商城关键词软件定制公司

猜你喜欢