[ad_1]
my doNothing method calls original method.This code is just boiler plate code to write dataframe to gcp bucket.
test("Test 3 : writeAuditTable") {
val mockWriter = mock[DataFrameWriter[Row]]
val mockDf = mock[DataFrame]
when(mockDf.coalesce(1)) thenReturn mockDf
when(mockDf.write) thenReturn mockWriter
when(mockWriter.option("table","tableName")) thenReturn mockWriter
when(mockWriter.option("parentProject",config.BQ_AUDIT_TABLE_PARENT_PROJECT)) thenReturn mockWriter
when(mockWriter.option("credentialsFile",auth_json)) thenReturn mockWriter
when(mockWriter.option("temporaryGcsBucket", config.BQ_AUDIT_TABLE_TEMP_BUCKET)) thenReturn mockWriter
when(mockWriter.format("com.google.cloud.spark.bigquery")) thenReturn mockWriter
when(mockWriter.mode(SaveMode.Append)) thenReturn mockWriter
doNothing().when(mockWriter).save();
writeAuditTable(df: DataFrame,"tableName": String, config: ReadProperties, auth_json: String)
verify(mockWriter).save()
}
my code..
def writeAuditTable(df: DataFrame,tableName: String, config: ReadProperties, auth_json: String) {
df.write.format("com.google.cloud.spark.bigquery")
.option("table", tableName)
.option("parentProject", config.BQ_AUDIT_TABLE_PARENT_PROJECT)
.option("credentialsFile", auth_json)
.option("temporaryGcsBucket", config.BQ_AUDIT_TABLE_TEMP_BUCKET)
.mode(SaveMode.Append)
.save()
}
this is using scala test and sugarmogito.
Here while trying to run i am getting..
java.lang.NullPointerException was thrown. java.lang.NullPointerException at
java.util.regex.Matcher.getTextLength(Matcher.java:1283) at
java.util.regex.Matcher.reset(Matcher.java:309) at
java.util.regex.Matcher.(Matcher.java:229) at
java.util.regex.Pattern.matcher(Pattern.java:1093) at
scala.util.matching.Regex.findFirstIn(Regex.scala:423) at
org.apache.spark.util.Utils$.$anonfun$redact$2(Utils.scala:2643) at
scala.Option.orElse(Option.scala:447) at
org.apache.spark.util.Utils$.$anonfun$redact$1(Utils.scala:2643) at
scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:285)
at scala.collection.Iterator.foreach(Iterator.scala:943) at
scala.collection.Iterator.foreach$(Iterator.scala:943) at
scala.collection.AbstractIterator.foreach(Iterator.scala:1431) at
scala.collection.IterableLike.foreach(IterableLike.scala:74) at
scala.collection.IterableLike.foreach$(IterableLike.scala:73) at
scala.collection.AbstractIterable.foreach(Iterable.scala:56) at
scala.collection.TraversableLike.map(TraversableLike.scala:285) at
scala.collection.TraversableLike.map$(TraversableLike.scala:278) at
scala.collection.AbstractTraversable.map(Traversable.scala:108) at
org.apache.spark.util.Utils$.redact(Utils.scala:2641) at
org.apache.spark.util.Utils$.redact(Utils.scala:2608) at
org.apache.spark.sql.internal.SQLConf.$anonfun$redactOptions$1(SQLConf.scala:2083)
at
scala.collection.LinearSeqOptimized.foldLeft(LinearSeqOptimized.scala:126)
at
scala.collection.LinearSeqOptimized.foldLeft$(LinearSeqOptimized.scala:122)
at scala.collection.immutable.List.foldLeft(List.scala:91) at
org.apache.spark.sql.internal.SQLConf.redactOptions(SQLConf.scala:2083)
at
org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.simpleString(SaveIntoDataSourceCommand.scala:52)
at
org.apache.spark.sql.catalyst.plans.QueryPlan.verboseString(QueryPlan.scala:177)
at
org.apache.spark.sql.catalyst.trees.TreeNode.generateTreeString(TreeNode.scala:548)
at
org.apache.spark.sql.catalyst.trees.TreeNode.treeString(TreeNode.scala:472)
at
org.apache.spark.sql.execution.QueryExecution.$anonfun$toString$3(QueryExecution.scala:197)
at
org.apache.spark.sql.execution.QueryExecution.stringOrError(QueryExecution.scala:99)
at
org.apache.spark.sql.execution.QueryExecution.toString(QueryExecution.scala:197)
at
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:75)
at
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:125)
at
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:73)
at
org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:676)
at
org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:290)
at
org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:271)
at
com.walmart.grcaml.bigQuery.BQUtils$.writeAuditTable(BQUtils.scala:45)
at
com.walmart.grcaml.bigQuery.BQUtilsTest.$anonfun$new$3(BQUtilsTest.scala:64)
[ad_2]