-
Notifications
You must be signed in to change notification settings - Fork 18
/
Copy pathSReadParallel.scala
103 lines (84 loc) · 2.85 KB
/
SReadParallel.scala
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
package examples
import edb.client.DBClient
import edb.common.{ExistingTableException, Row, Schema, UnknownTableException}
import edb.server.DBServer
import examples.utils.RDDUtils
import org.apache.spark.sql.SparkSession
import java.io.IOException
import scala.collection.JavaConverters._
object SReadParallel {
@throws[IOException]
@throws[InterruptedException]
@throws[ExistingTableException]
@throws[UnknownTableException]
def main(args: Array[String]): Unit = {
val serverHost = "localhost"
val serverPort = 50199
val server = new DBServer(serverPort)
server.start()
System.out.println("*** Example database server started")
//
// Since this DataSource doesn't support writing, we need to populate
// ExampleDB with some data.
//
val schema = new Schema
schema.addColumn("u", Schema.ColumnType.INT64)
schema.addColumn("v", Schema.ColumnType.DOUBLE)
val client = new DBClient(serverHost, serverPort)
client.createTable("myTable", schema)
val toInsert = for {
i <- (0 to 19).toList
} yield {
val r = new Row
r.addField(new Row.Int64Field("u", i * 100))
r.addField(new Row.DoubleField("v", i + 0.2))
r
}
client.bulkInsert("myTable", toInsert.asJava)
System.out.println("*** Example database server populated with data")
// By default this data source supports creating Datasets with four partitions.
val dataSourceName = "datasources.ParallelRowDataSource"
val spark = SparkSession.builder
.appName("SReadParallel")
.master("local[4]")
.getOrCreate
//
// This is where we read from our DataSource. Notice how we use the
// fully qualified class name and provide the information needed to connect to
// ExampleDB using options. This time we'll use ExampleDB's default number of table
// partitions, 4, so we don't need to specify it.
//
var data = spark.read
.format(dataSourceName)
.option("host", serverHost)
.option("port", serverPort)
.option("table", "myTable").load
System.out.println("*** Schema: ")
data.printSchema()
System.out.println("*** Data: ")
data.show()
//
// Since this DataSource supports reading from one executor,
// there will be a multiple partitions.
//
RDDUtils.analyze(data)
//
// We can specify a different number of partitions too, overriding ExampleDB's default.
//
data = spark.read.format(dataSourceName)
.option("host", serverHost)
.option("port", serverPort)
.option("table", "myTable")
.option("partitions", 6).load // number of partitions specified here
System.out.println("*** Schema: ")
data.printSchema()
System.out.println("*** Data: ")
data.show()
//
// This time we see six partitions.
//
RDDUtils.analyze(data)
spark.stop()
server.stop()
}
}