If you have enabled Iceberg support (see Enable Iceberg), required iceberg configurations are automatically set up in spark-defaults.conf. You can start Spark normally without additional configuration.
Navigate to ${SPARK_HOME}/bin folder and export the JWT token:
## Create a DataFrame.data=spark.createDataFrame([("100","2015-01-01","2015-01-01T13:51:39.340396Z"),("101","2015-01-01","2015-01-01T12:14:58.597216Z"),("102","2015-01-01","2015-01-01T13:51:40.417052Z"),("103","2015-01-01","2015-01-01T13:51:40.519832Z")],["id","creation_date","last_update_time"])## Write a DataFrame as an Iceberg dataset.spark.sql("""CREATE TABLE IF NOT EXISTS iceberg_catalog.db.table_name ( id string, creation_date string, last_update_time string) USING iceberg LOCATION 's3a://${S3_BUCKET}/${TABLE_PATH}'""")data.writeTo("iceberg_catalog.db.table_name").append()## Read from Iceberg tabledf=spark.read.format("iceberg").load("iceberg_catalog.db.table_name")df.show()
##CreateaDataFrame.valdata=Seq(("100","2015-01-01","2015-01-01T13:51:39.340396Z"),("101","2015-01-01","2015-01-01T12:14:58.597216Z"),("102","2015-01-01","2015-01-01T13:51:40.417052Z"),("103","2015-01-01","2015-01-01T13:51:40.519832Z")).toDF("id","creation_date","last_update_time")##WriteaDataFrameasanIcebergdataset.spark.sql("""CREATE TABLE IF NOT EXISTS iceberg_catalog.db.table_name ( id string, creation_date string, last_update_time string) USING iceberg LOCATION 's3a://${S3_BUCKET}/${TABLE_PATH}'""")data.writeTo("iceberg_catalog.db.table_name").append()##ReadfromIcebergtablevaldf=spark.read.format("iceberg").load("iceberg_catalog.db.table_name")df.show()