|
| 1 | +// Copyright (c) 2013-2020 Rob Norris and Contributors |
| 2 | +// This software is licensed under the MIT License (MIT). |
| 3 | +// For more information see LICENSE or https://opensource.org/licenses/MIT |
| 4 | + |
| 5 | +package doobie.bench |
| 6 | + |
| 7 | +import cats.effect.IO |
| 8 | +import com.zaxxer.hikari.{HikariConfig, HikariDataSource} |
| 9 | +import doobie.* |
| 10 | +import doobie.syntax.all.* |
| 11 | +import org.openjdk.jmh.annotations.* |
| 12 | +import org.openjdk.jmh.infra.Blackhole |
| 13 | +import scala.util.Using |
| 14 | + |
| 15 | +@State(Scope.Benchmark) |
| 16 | +@OperationsPerInvocation(10000) // We process 10k rows so adjust the benchmark output accordingly |
| 17 | +class LargeRow { |
| 18 | + import cats.effect.unsafe.implicits.global |
| 19 | + |
| 20 | + private val hikariConfig = { |
| 21 | + val config = new HikariConfig() |
| 22 | + config.setDriverClassName("org.postgresql.Driver") |
| 23 | + config.setJdbcUrl("jdbc:postgresql:world") |
| 24 | + config.setUsername("postgres") |
| 25 | + config.setPassword("password") |
| 26 | + config.setMaximumPoolSize(2) |
| 27 | + config |
| 28 | + } |
| 29 | + |
| 30 | + val pool = new HikariDataSource(hikariConfig) |
| 31 | + |
| 32 | + val (xa, cleanup) = { |
| 33 | + (for { |
| 34 | + connectEC <- ExecutionContexts.fixedThreadPool[IO](hikariConfig.getMaximumPoolSize) |
| 35 | + } yield Transactor.fromDataSource[IO].apply[HikariDataSource](pool, connectEC)).allocated.unsafeRunSync() |
| 36 | + } |
| 37 | + |
| 38 | + @Setup(Level.Trial) |
| 39 | + def setup(): Unit = { |
| 40 | + val connio = for { |
| 41 | + _ <- sql"""DROP TABLE IF EXISTS data""".update.run |
| 42 | + _ <- sql"""CREATE TABLE data ( |
| 43 | + col1 DOUBLE PRECISION, |
| 44 | + col2 VARCHAR(50), |
| 45 | + col3 INTEGER, |
| 46 | + col4 VARCHAR(50), |
| 47 | + col5 DOUBLE PRECISION, |
| 48 | + col6 DOUBLE PRECISION, |
| 49 | + col7 VARCHAR(50), |
| 50 | + col8 VARCHAR(50) |
| 51 | + );""".update.run |
| 52 | + _ <- sql"select setseed(0.5)".query[Unit].unique // deterministic seed |
| 53 | + _ <- sql"""INSERT INTO data (col1, col2, col3, col4, col5, col6, col7, col8) |
| 54 | + SELECT random(), random() :: text, (random() * 1000) :: int, random() :: text, random(), random(), random() :: text, random() :: text |
| 55 | + FROM generate_series(1, 10000) |
| 56 | + """.update.run |
| 57 | + } yield () |
| 58 | + |
| 59 | + connio.transact(xa).unsafeRunSync() |
| 60 | + } |
| 61 | + |
| 62 | + @TearDown(Level.Trial) |
| 63 | + def teardown(): Unit = { |
| 64 | + pool.close() |
| 65 | + cleanup.unsafeRunSync() |
| 66 | + } |
| 67 | + |
| 68 | + @Benchmark |
| 69 | + def tuple(bh: Blackhole): Unit = { |
| 70 | + bh.consume(sql"""SELECT col1, col2, col3, col4, col5, col6, col7, col8 FROM data""" |
| 71 | + .query[(Double, String, Int, String, Double, Double, String, String)].to[List].transact(xa).unsafeRunSync()) |
| 72 | + } |
| 73 | + |
| 74 | + @Benchmark |
| 75 | + def tupleOpt(bh: Blackhole): Unit = { |
| 76 | + bh.consume(sql"""SELECT col1, col2, col3, col4, col5, col6, col7, col8 FROM data""" |
| 77 | + .query[Option[(Double, String, Int, String, Double, Double, String, String)]].to[List].transact(xa).unsafeRunSync()) |
| 78 | + } |
| 79 | + |
| 80 | + @Benchmark |
| 81 | + def semiautoDerivedComplex(bh: Blackhole): Unit = { |
| 82 | + import SemiautoDerivedInstances.* |
| 83 | + bh.consume(sql"""SELECT col1, col2, col3, col4, col5, col6, col7, col8 FROM data""" |
| 84 | + .query[Complex].to[List].transact(xa).unsafeRunSync()) |
| 85 | + } |
| 86 | + |
| 87 | + @Benchmark |
| 88 | + def semiautoDerivedComplexOpt(bh: Blackhole): Unit = { |
| 89 | + import SemiautoDerivedInstances.* |
| 90 | + bh.consume(sql"""SELECT col1, col2, col3, col4, col5, col6, col7, col8 FROM data""" |
| 91 | + .query[Option[Complex]].to[List].transact(xa).unsafeRunSync()) |
| 92 | + } |
| 93 | + |
| 94 | + @Benchmark |
| 95 | + def autoDerivedComplex(bh: Blackhole): Unit = { |
| 96 | + import doobie.implicits.* |
| 97 | + bh.consume(sql"""SELECT col1, col2, col3, col4, col5, col6, col7, col8 FROM data""" |
| 98 | + .query[Complex].to[List].transact(xa).unsafeRunSync()) |
| 99 | + } |
| 100 | + |
| 101 | + @Benchmark |
| 102 | + def rawJdbcComplex(bh: Blackhole): Unit = { |
| 103 | + var l: List[Complex] = null |
| 104 | + Using.resource(pool.getConnection()) { c => |
| 105 | + Using.resource(c.prepareStatement("SELECT col1, col2, col3, col4, col5, col6, col7, col8 FROM data")) { ps => |
| 106 | + Using.resource(ps.executeQuery()) { rs => |
| 107 | + val m = scala.collection.mutable.ArrayBuffer.empty[Complex] |
| 108 | + while (rs.next()) { |
| 109 | + m += Complex( |
| 110 | + DSIS( |
| 111 | + DS( |
| 112 | + rs.getDouble(1), |
| 113 | + rs.getString(2) |
| 114 | + ), |
| 115 | + IS( |
| 116 | + rs.getInt(3), |
| 117 | + rs.getString(4) |
| 118 | + ) |
| 119 | + ), |
| 120 | + DDSS( |
| 121 | + DD( |
| 122 | + rs.getDouble(5), |
| 123 | + rs.getDouble(6) |
| 124 | + ), |
| 125 | + SS( |
| 126 | + rs.getString(6), |
| 127 | + rs.getString(7) |
| 128 | + ) |
| 129 | + ) |
| 130 | + ) |
| 131 | + } |
| 132 | + l = m.toList |
| 133 | + } |
| 134 | + } |
| 135 | + |
| 136 | + } |
| 137 | + bh.consume(l) |
| 138 | + } |
| 139 | +} |
| 140 | + |
| 141 | +case class IS(i: Int, s: String) |
| 142 | +case class DS(d: Double, s: String) |
| 143 | +case class DSIS(ds: DS, is: IS) |
| 144 | +case class DD(d0: Double, d1: Double) |
| 145 | +case class SS(s0: String, s1: String) |
| 146 | +case class DDSS(dd: DD, ss: SS) |
| 147 | +case class Complex(dsis: DSIS, ddss: DDSS) |
| 148 | + |
| 149 | +object SemiautoDerivedInstances { |
| 150 | + implicit val isRead: Read[IS] = Read.derived |
| 151 | + implicit val dsRead: Read[DS] = Read.derived |
| 152 | + implicit val dsisRead: Read[DSIS] = Read.derived |
| 153 | + implicit val ddRead: Read[DD] = Read.derived |
| 154 | + implicit val ssRead: Read[SS] = Read.derived |
| 155 | + implicit val ddssRead: Read[DDSS] = Read.derived |
| 156 | + implicit val cRead: Read[Complex] = Read.derived |
| 157 | +} |
0 commit comments