class07 Spark RDD编程33实验

7.2RDD的操作方法

1.map(fac)映射转换

 

 
val rdd1 = sc.parallelize(List(1,2,3,4))
val result = rdd1.map(x => x + 2)
rdd1: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[0] at parallelize at <console>:25
result: org.apache.spark.rdd.RDD[Int] = MapPartitionsRDD[1] at map at <console>:26

 

 
//转为数组对象
result.collect()
res1: Array[Int] = Array(3, 4, 5, 6)

 

 
// map()求平均值
val rdd1 = sc.parallelize(List(1,2,3,4))
val result = rdd1.map(x => x * x)
println(result.collect().mkString(";"))
1;4;9;16
rdd1: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[2] at parallelize at <console>:29
result: org.apache.spark.rdd.RDD[Int] = MapPartitionsRDD[3] at map at <console>:30

 

 
//键值对RDD
val wordsRDD = sc.parallelize(List("happy everyday","hello world","how are you"))
val PairRDD = wordsRDD.map(x => (x.split(" ")(0) , x))
PairRDD.collect()
wordsRDD: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[4] at parallelize at <console>:26
PairRDD: org.apache.spark.rdd.RDD[(String, String)] = MapPartitionsRDD[5] at map at <console>:27
res4: Array[(String, String)] = Array((happy,happy everyday), (hello,hello world), (how,how are you))

 

 
## *2.filter(func)过滤*

 

 
val rdd2 = sc.parallelize(List(1,2,3,4,5,6,7,8,9))
rdd2.filter(x => x > 4).collect()
rdd2: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[6] at parallelize at <console>:25
res9: Array[Int] = Array(5, 6, 7, 8, 9)

 

 
// 学生数据
val students = sc.parallelize(List("daiblo java 100","hello scala 88","White python 89"))
//三元组
val studentsTup = students.map{x => val splits = x.split(" "); (splits(0),splits(1),splits(2).toInt)}
studentsTup.collect()
//成绩100
studentsTup.filter(_._3 == 100).map{x => (x._1 , x._2)}.collect().foreach(println)
(daiblo,java)
students: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[11] at parallelize at <console>:29
studentsTup: org.apache.spark.rdd.RDD[(String, String, Int)] = MapPartitionsRDD[12] at map at <console>:31

3.flatMap(func)映射

 

 
val rdd3 = sc.parallelize(List(1,2,3,4,5,6))
val rdd31 = rdd3.map(_ * 2)
rdd31.collect()
rdd3: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[19] at parallelize at <console>:28
rdd31: org.apache.spark.rdd.RDD[Int] = MapPartitionsRDD[20] at map at <console>:29
res18: Array[Int] = Array(2, 4, 6, 8, 10, 12)

 

 
val rdd32 = rdd31.filter(x => x > 5).flatMap(x => x to 9)
rdd32.collect()
rdd32: org.apache.spark.rdd.RDD[Int] = MapPartitionsRDD[22] at flatMap at <console>:28
res19: Array[Int] = Array(6, 7, 8, 9, 8, 9)

 

 
//切分单词
def tokenize(ws:String) = {ws.split(" ").toList}
val lines = sc.parallelize(List("coffee panda","happy panda","panda party"))
lines.map(tokenize).collect().foreach(println)
lines.flatMap(tokenize).collect().foreach(println)
List(coffee, panda)
List(happy, panda)
List(panda, party)
coffee
panda
happy
panda
panda
party
tokenize: (ws: String)List[String]
lines: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[31] at parallelize at <console>:30

4.distinct([numPartitions])去重转换

 

 
val rdd4 = sc.parallelize(List(1,2,3,4,4,2,8,8,4,6))
val distinctRdd = rdd4.distinct()
distinctRdd.collect()
rdd4: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[34] at parallelize at <console>:25
distinctRdd: org.apache.spark.rdd.RDD[Int] = MapPartitionsRDD[37] at distinct at <console>:26
res30: Array[Int] = Array(4, 6, 8, 2, 1, 3)

5.union(otherDataset)合并转换

val rdd51 = sc.parallelize(List(1,3,4,5))
val rdd52 = sc.parallelize(List(2,3,4,7,8,9))
val result = rdd51.union(rdd52)
result.collect()
rdd51: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[38] at parallelize at <console>:27
rdd52: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[39] at parallelize at <console>:28
result: org.apache.spark.rdd.RDD[Int] = UnionRDD[40] at union at <console>:29
res31: Array[Int] = Array(1, 3, 4, 5, 2, 3, 4, 7, 8, 9)

6.intersection(otherRDD)交集且去重转换

 

 
val rdd61 = sc.parallelize(List(1,3,4,5))
val rdd62 = sc.parallelize(List(2,3,4,6,5,8,9))
val result = rdd61.intersection(rdd62)
result.collect()
rdd61: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[55] at parallelize at <console>:29
rdd62: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[56] at parallelize at <console>:30
result: org.apache.spark.rdd.RDD[Int] = MapPartitionsRDD[62] at intersection at <console>:31
res37: Array[Int] = Array(4, 3, 5)

7.subtract(otherRDD)差集转换

 

 
val rdd71 = sc.parallelize(List(1,3,4,5))
val rdd72 = sc.parallelize(1 to 5).subtract(rdd71)
println(rdd72.collect().toBuffer)
ArrayBuffer(2)
rdd71: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[49] at parallelize at <console>:25
rdd72: org.apache.spark.rdd.RDD[Int] = MapPartitionsRDD[54] at subtract at <console>:26

8.cartesian()笛卡尔积转换

 

 
val rdd81 = sc.parallelize(List(1,3,4,5))
val rdd82 = sc.parallelize(List(4,6,5))
val result = rdd81.cartesian(rdd82)
result.collect()
rdd81: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[63] at parallelize at <console>:27
rdd82: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[64] at parallelize at <console>:28
result: org.apache.spark.rdd.RDD[(Int, Int)] = CartesianRDD[65] at cartesian at <console>:29
res38: Array[(Int, Int)] = Array((1,4), (3,4), (1,6), (1,5), (3,6), (3,5), (4,4), (5,4), (4,6), (4,5), (5,6), (5,5))

9.mapValues()转换

 

 
val rdd91 = sc.parallelize(1 to 9 , 3)
rdd91.collect()
val result = rdd91.map(item => (item % 4 , item)).mapValues(v => v + 10)
println(result.collect().toBuffer)
ArrayBuffer((1,11), (2,12), (3,13), (0,14), (1,15), (2,16), (3,17), (0,18), (1,19))
rdd91: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[66] at parallelize at <console>:27
result: org.apache.spark.rdd.RDD[(Int, Int)] = MapPartitionsRDD[68] at mapValues at <console>:29

10.groupByKey()分组转换

 

 
val rdd101 = sc.parallelize(1 to 9 , 3)
val rddMap = rdd101.map(item => (item % 3 , item))
val rdd102 = rddMap.groupByKey()
rdd102.collect()
rdd101: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[69] at parallelize at <console>:25
rddMap: org.apache.spark.rdd.RDD[(Int, Int)] = MapPartitionsRDD[70] at map at <console>:26
rdd102: org.apache.spark.rdd.RDD[(Int, Iterable[Int])] = ShuffledRDD[71] at groupByKey at <console>:27
res40: Array[(Int, Iterable[Int])] = Array((0,CompactBuffer(3, 6, 9)), (1,CompactBuffer(1, 4, 7)), (2,CompactBuffer(2, 5, 8)))

11.reduceByKey(func,[numPartitions])分组聚合转换

 

 
val rddMap = sc.parallelize(1 to 12 , 4).map(item => (item % 4,item))
rddMap.collect()
rddMap: org.apache.spark.rdd.RDD[(Int, Int)] = MapPartitionsRDD[81] at map at <console>:27
res43: Array[(Int, Int)] = Array((1,1), (2,2), (3,3), (0,4), (1,5), (2,6), (3,7), (0,8), (1,9), (2,10), (3,11), (0,12))

 

 
val rdd111 = rddMap.reduceByKey((x,y) => x + y)
rdd111.collect()
rdd111: org.apache.spark.rdd.RDD[(Int, Int)] = ShuffledRDD[84] at reduceByKey at <console>:28
res45: Array[(Int, Int)] = Array((0,24), (1,15), (2,18), (3,21))

 

rddMap.reduceByKey((x,y) => x * y).collect()
res46: Array[(Int, Int)] = Array((0,384), (1,45), (2,120), (3,231))

12.combineByKey()分区聚合转换

 

 
val rdd121 = sc.parallelize(1 to 9 , 3)
val rdd122 = rdd121.map(item => (item % 3 , item)).mapValues(v => v.toDouble).combineByKey((v:Double) => (v,1),
                                                                                           (c: (Double,Int),v: Double) => (c._1 + v,c._2 + 1),
                                                                                           (c1: (Double,Int),c2: (Double,Int)) => (c1._1 + c2._1, c1._2 + c2._2))
rdd122.collect()
rdd121: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[86] at parallelize at <console>:25
rdd122: org.apache.spark.rdd.RDD[(Int, (Double, Int))] = ShuffledRDD[89] at combineByKey at <console>:26
res47: Array[(Int, (Double, Int))] = Array((0,(18.0,3)), (1,(12.0,3)), (2,(15.0,3)))

13.sortByKey(ascending,[numPartitions])

 

 
val rdd13 = sc.parallelize(List(("A",1),("B",3),("C",2),("D",5)))
val rdd131 = sc.parallelize(List(("B",1),("A",3),("C",2),("D",5)))
val rdd132 = rdd13.union(rdd131)
//按键聚合
val rdd133 = rdd132.reduceByKey(_ + _)
val rdd134 = rdd132.sortByKey(false)
rdd134.collect()
rdd13: org.apache.spark.rdd.RDD[(String, Int)] = ParallelCollectionRDD[90] at parallelize at <console>:25
rdd131: org.apache.spark.rdd.RDD[(String, Int)] = ParallelCollectionRDD[91] at parallelize at <console>:26
rdd132: org.apache.spark.rdd.RDD[(String, Int)] = UnionRDD[92] at union at <console>:27
rdd133: org.apache.spark.rdd.RDD[(String, Int)] = ShuffledRDD[93] at reduceByKey at <console>:29
rdd134: org.apache.spark.rdd.RDD[(String, Int)] = ShuffledRDD[96] at sortByKey at <console>:30
res48: Array[(String, Int)] = Array((D,5), (D,5), (C,2), (C,2), (B,3), (B,1), (A,1), (A,3))

14.sortBy(f:(T) => K, [ascending:Boolean = true],[numPartitions])转换

 

 
val goods = sc.parallelize(List("radio 23 30","soap 233 10","cup 12 20","bowl 35 8"))
goods: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[97] at parallelize at <console>:25

 

 
//按键排序
val goodsTup = goods.map{x => val splits = x.split(" "); (splits(0),splits(1).toDouble,splits(2).toInt)}
goodsTup.sortBy(_._1).collect().foreach(println)
(bowl,35.0,8)
(cup,12.0,20)
(radio,23.0,30)
(soap,233.0,10)
goodsTup: org.apache.spark.rdd.RDD[(String, Double, Int)] = MapPartitionsRDD[98] at map at <console>:26

 

 
//按值排序
//单价
goodsTup.sortBy(x => x._2 , false).collect().foreach(println)
(soap,233.0,10)
(bowl,35.0,8)
(radio,23.0,30)
(cup,12.0,20)

 

 
//数量
goodsTup.sortBy(_._3).collect().foreach(println)
(bowl,35.0,8)
(soap,233.0,10)
(cup,12.0,20)
(radio,23.0,30)

 

 
//数量与7的余数
goodsTup.sortBy(x => x._3 % 7).collect().foreach(println)
(bowl,35.0,8)
(radio,23.0,30)
(soap,233.0,10)
(cup,12.0,20)

 

 
//元组,按数组元素
goodsTup.sortBy(x => (-x._2 , -x._3)).collect().foreach(println)
(soap,233.0,10)
(bowl,35.0,8)
(radio,23.0,30)
(cup,12.0,20)

15.sample(withReplacement,fraction,seed)转换

 

 
val rdd15 = sc.parallelize(1 to 1000)
rdd15.sample(false,0.01,1).collect().foreach(x => print(x + " "))
110 137 196 231 283 456 483 513 605 618 634 784 
rdd15: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[126] at parallelize at <console>:27

16.join(otherDataset,[numPartitions])转换

 

 
val rdd161 = sc.parallelize(List( ("scala" , 2) , ("java" , 3), ("python" ,4) ,("scala" , 8)))
val rdd162 = sc.parallelize(List( ("scala" , 2) , ("java" , 32), ("python" ,4) ,("hbase" , 8)))
val rdd163 = rdd161.join(rdd162)
rdd163.collect()
rdd161: org.apache.spark.rdd.RDD[(String, Int)] = ParallelCollectionRDD[128] at parallelize at <console>:25
rdd162: org.apache.spark.rdd.RDD[(String, Int)] = ParallelCollectionRDD[129] at parallelize at <console>:26
rdd163: org.apache.spark.rdd.RDD[(String, (Int, Int))] = MapPartitionsRDD[132] at join at <console>:27
res57: Array[(String, (Int, Int))] = Array((scala,(2,2)), (scala,(8,2)), (python,(4,4)), (java,(3,32)))

 

 
val left_join = rdd161.leftOuterJoin(rdd162)
left_join.collect()
left_join: org.apache.spark.rdd.RDD[(String, (Int, Option[Int]))] = MapPartitionsRDD[138] at leftOuterJoin at <console>:29
res59: Array[(String, (Int, Option[Int]))] = Array((scala,(2,Some(2))), (scala,(8,Some(2))), (python,(4,Some(4))), (java,(3,Some(32))))

 

 
val full_join = rdd161.fullOuterJoin(rdd162)
full_join.collect()
full_join: org.apache.spark.rdd.RDD[(String, (Option[Int], Option[Int]))] = MapPartitionsRDD[141] at fullOuterJoin at <console>:27
res60: Array[(String, (Option[Int], Option[Int]))] = Array((scala,(Some(2),Some(2))), (scala,(Some(8),Some(2))), (python,(Some(4),Some(4))), (java,(Some(3),Some(32))), (hbase,(None,Some(8))))

17.zip()转换

 

 
val rdd171 = sc.parallelize(Array(1,2,3),3)
val rdd172 = sc.parallelize(Array("a","b","c"),3)
val ziprdd = rdd171.zip(rdd172)
ziprdd.collect()
rdd171: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[142] at parallelize at <console>:25
rdd172: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[143] at parallelize at <console>:26
ziprdd: org.apache.spark.rdd.RDD[(Int, String)] = ZippedPartitionsRDD2[144] at zip at <console>:27
res61: Array[(Int, String)] = Array((1,a), (2,b), (3,c))

18.转换操作keys与valuse

 

 
ziprdd.keys.collect
res63: Array[Int] = Array(1, 2, 3)

 

ziprdd.values.collect
res64: Array[String] = Array(a, b, c)

19.coalesce(numPartitions:Int)重新分区转换

 

 
val rdd = sc.parallelize(1 to 16 ,4)
rdd.partitions.size
rdd: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[149] at parallelize at <console>:27
res66: Int = 4

 

 
val coalrdd = rdd.coalesce(5)
coalrdd.partitions.size
coalrdd: org.apache.spark.rdd.RDD[Int] = CoalescedRDD[150] at coalesce at <console>:26
res67: Int = 4

 

 
val coalrdd1 = rdd.coalesce(5,true)
coalrdd1.partitions.size
coalrdd1: org.apache.spark.rdd.RDD[Int] = MapPartitionsRDD[154] at coalesce at <console>:26
res69: Int = 5

20.repartition(numPartitions:Int)重新分区转换

 

 
val rdd = sc.parallelize(1 to 16 ,8)
rdd.partitions.size
rdd: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[155] at parallelize at <console>:27
res70: Int = 8

 

 
val rerdd = rdd.repartition(2)
rerdd.partitions.size
rerdd: org.apache.spark.rdd.RDD[Int] = MapPartitionsRDD[163] at repartition at <console>:28
res73: Int = 2

 

 
rerdd.getNumPartitions
res74: Int = 2

 

7.2.2行动操作

 

 
// 1.collect()
val rdd1 = sc.makeRDD(List(1,2,3,4,5,6,2,5,1))
rdd1.collect()
rdd1: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[165] at makeRDD at <console>:28
res78: Array[Int] = Array(1, 2, 3, 4, 5, 6, 2, 5, 1)

 

 
//2.count()
println(rdd1.count())
9

 

 
//3.countByValue()
rdd1.countByValue()
res79: scala.collection.Map[Int,Long] = Map(5 -> 2, 1 -> 2, 6 -> 1, 2 -> 2, 3 -> 1, 4 -> 1)

 

 
//4.countByKey()
val rdd = sc.makeRDD(List( ("scala" , 2) , ("java" , 32), ("python" ,4) ,("hbase" , 8)))
rdd.countByKey()
rdd: org.apache.spark.rdd.RDD[(String, Int)] = ParallelCollectionRDD[169] at makeRDD at <console>:27
res80: scala.collection.Map[String,Long] = Map(scala -> 1, python -> 1, java -> 1, hbase -> 1)

 

 
// 5.first()
val rdd = sc.makeRDD(List("scala", "java" ,"python","hbase"))
rdd.first()
rdd: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[172] at makeRDD at <console>:28
res81: String = scala

 

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mfbz.cn/a/597845.html

如若内容造成侵权/违法违规/事实不符,请联系我们进行投诉反馈qq邮箱809451989@qq.com,一经查实,立即删除!

相关文章

stateflow绝对时间逻辑实操

使用after运算符替换at运算符 如果将at运算符与绝对时间-时间逻辑一起使用,则在尝试模拟模型时会出现错误消息。请改用after运算符。 假设您想使用(5.33,秒)的转换来定义时间延迟。 将转换更改为after(5.33秒),如图所示。这样就不报错了。 使用带有后运算符的外部自循…

【源码+文档+安装教程】校园社团信息管理系统

摘要 随着信息技术在管理上越来越深入而广泛的应用&#xff0c;管理信息系统的实施在技术上已逐步成熟。本文介绍了校园社团信息管理系统的开发全过程。通过分析校园社团信息管理系统管理的不足&#xff0c;创建了一个计算机管理校园社团信息管理系统的方案。文章介绍了校园社团…

【EasySpider】EasySpider+mysql执行配置异常

问题 使用易采集工具操作时候&#xff0c;遇到一个执行异常&#xff0c;后来发现没有选择数据类型 Loading stealth.min.js MySQL config file path: ./mysql_config.json 成功连接到数据库。 Successfully connected to the database. Traceback (most recent call last):…

做了两年数仓,积累的12条SQL调优技巧

本文是作者本人做数仓调优时&#xff0c;所经常使用的SQL调优技巧&#xff0c;这些“技巧”也是经过日常不断摸索、问题排查以及网络检索并且经过本人在线上大规模使用过的&#xff0c;对于下面这12条&#xff08;不算多&#xff0c;但特别有用&#xff09;调优小“技巧”&…

[qnx] 通过zcu104 SD卡更新qnx镜像的步骤

0. 概述 本文演示如果给Xlinx zcu104开发板刷入自定义的qnx镜像 1.将拨码开关设置为SD卡启动 如下图所示&#xff0c;将1拨到On,2,3,4拨到Off&#xff0c;即为通过SD启动。 2.准备SD卡中的内容 首先需要将SD格式化为FAT32的&#xff08;如果已经是FAT32格式&#xff0c;则…

Python高级编程-DJango1

Python高级编程 灵感并不是在逻辑思考的延长线上产生 而是在破除逻辑或常识的地方才有灵感 目录 Python高级编程 1.python学习之前的准备 ​编辑 2.DJango 开发网站 3.创建项目 4.&#xff44;&#xff4a;&#xff41;&#xff4e;&#xff47;项目结构介绍 &#xff11;&…

转行HiL测试工程师

转行没方向&#xff1f;0基础也能转新能源汽车HiL测试岗位&#xff01; 都2024年了&#xff0c;不会还有同学想往软件测试、车载测试方向转吧&#xff01;996、卷经验、卷待遇… ❓❓❓❓想转行没有方向&#xff1f; 建议选择发展前景好的行业&#xff0c;转行前先找好行业&…

目标检测——打架视频数据集

引言 亲爱的读者们&#xff0c;您是否在寻找某个特定的数据集&#xff0c;用于研究或项目实践&#xff1f;欢迎您在评论区留言&#xff0c;或者通过公众号私信告诉我&#xff0c;您想要的数据集的类型主题。小编会竭尽全力为您寻找&#xff0c;并在找到后第一时间与您分享。 …

C++ | Leetcode C++题解之第73题矩阵置零

题目&#xff1a; 题解&#xff1a; class Solution { public:void setZeroes(vector<vector<int>>& matrix) {int m matrix.size();int n matrix[0].size();int flag_col0 false;for (int i 0; i < m; i) {if (!matrix[i][0]) {flag_col0 true;}for …

【Markdown笔记】——扩展语法学习part3 表格脚注标题编号(锚点)列表删除线人物列表(todo列表)emoji等

【Markdown笔记】——扩展语法学习part3 表格&脚注等 MarkdownMarkdown 表格语法表格内容居中、左对齐、右对齐 Markdown 脚注语法Markdown 标题编号语法Markdown 列表语法Markdown 删除线语法Markdown 任务列表语法Markdown 使用 Emoji 表情 前几篇markdown相关博客&#…

基于 Spring Boot 博客系统开发(七)

基于 Spring Boot 博客系统开发&#xff08;七&#xff09; 本系统是简易的个人博客系统开发&#xff0c;为了更加熟练地掌握 SprIng Boot 框架及相关技术的使用。&#x1f33f;&#x1f33f;&#x1f33f; 基于 Spring Boot 博客系统开发&#xff08;六&#xff09;&#x1f…

2024年第十三届工程与创新材料国际会议(ICEIM 2024)即将召开!

2024年第十三届工程与创新材料国际会议&#xff08;ICEIM 2024&#xff09;将于2024年9月6-8日在日本东京举行。ICEIM 2024由东京电机大学主办&#xff0c;会议旨在材料科学与工程、材料特性、测量方法和应用等相关领域进行学术交流与合作&#xff0c;在材料的微观世界里&#…

异或的使用在机器人项目上的应用||位运算符在智能驾驶项目上的应用

目录 一、异或的使用在机器人项目上的应用 二、异或&#xff08;XOR&#xff09;操作的几个特点 三、位运算符在智能驾驶项目上的应用 一、异或的使用在机器人项目上的应用 在当时负责皮带机器人项目中&#xff0c;就有一个很好的应用。此时需要设置电机驱动模块、编码器驱动…

Leetcode—724. 寻找数组的中心下标【简单】

2024每日刷题&#xff08;129&#xff09; Leetcode—724. 寻找数组的中心下标 实现代码 class Solution { public:int pivotIndex(vector<int>& nums) {int sum accumulate(nums.begin(), nums.end(), 0);int prefix 0;for(int i 0; i < nums.size(); i) {i…

为antd design vue组件库中的表格添加斑马线、鼠标悬浮表格中字体转变颜色的效果

前言&#xff1a; 在公司完成UI设计稿时&#xff0c;需要实现antd design vue组件库中的表格展示斑马线样式&#xff0c;同时具有鼠标悬浮表格中字体转变颜色的效果&#xff0c;经过多次尝试&#xff0c;最终实现&#xff0c;总结如下&#xff1a; <style lang"scss&q…

软件测试经理工作日常随记【2】-接口自动化

软件测试主管工作日常随记【2】-接口自动化 1.接口自动化 jmeter-反电诈项目 这个我做过的一个非常有意义的项目&#xff0c;和腾讯合作的&#xff0c;主要为用户拦截并提示所有可能涉及到的诈骗类型&#xff0c;并以裂变的形式扩展用户&#xff0c;这个项目前期后端先完成&…

ubuntu22.04:软件包 wps-office 需要重新安装,但是我无法找到相应的安装文件

错误原因&#xff1a;手动在wps官网上下载的linux deb版本的wps2019,想卸载但是一直报错 解决办法&#xff1a;执行如下命令 sudo rm -rf /var/lib/dpkg/info/wps-office*sudo dpkg --remove --force-remove-reinstreq wps-office 说明&#xff1a; sudo命令是以root执行&…

FIFO Generate IP核AXI接口配置全解

当需要在设计中使用自定义IP时&#xff0c;可以通过为IP核的各种参数指定值来进行定制。以下是一般步骤的概述&#xff1a; 首先是从IP catalog中选择IP核。 然后双击这个选定的IP核&#xff0c;打开一个定制向导或参数设置窗口。或在工具栏或右键菜单中选择“Customize IP”命…

SAPUI5基础知识1 - 概览,库,支持工具,自学教程

1. SAPUI5 概览 1.1 SAPUI5 SAPUI5是一种用于构建企业级Web应用程序的开发框架。它是由SAP开发的&#xff0c;基于HTML5、CSS3和JavaScript技术。 SAPUI5提供了一套丰富的UI控件和工具&#xff0c;使开发人员能够快速构建现代化、可扩展和可定制的应用程序。 它还提供了数据…

STM32CubeMX学习笔记32---FreeRTOS资源管理

一、CPU利用率简介 1 基本概念 CPU 使用率其实就是系统运行的程序占用的 CPU 资源&#xff0c;表示机器在某段时间程序运行的情况&#xff0c;如果这段时间中&#xff0c;程序一直在占用 CPU 的使用权&#xff0c;那么可以人为 CPU 的利用率是 100%。CPU 的利用率越高&#xf…
最新文章