- ModelData2=ModelData.toPandas() #CONVERTS SPARK DF TO PANDAS DF
- table_model = spark.createDataFrame(ModelData2) # CREATES SPARK DF
- table_model.write.saveAsTable(‘LIBRARYPATH.model_data’) #SAVES AS TABLE
AND
new_df = transformed_chrn2[[‘Var1’, ‘Var2’, ‘Var3’, ‘Var4′,’Var5’]]
table_df = spark.createDataFrame(new_df)
table_df.write.saveAsTable(‘directory_name.table_name’)
SOURCE
https://stackoverflow.com/questions/30664008/how-to-save-dataframe-directly-to-hive
https://docs.microsoft.com/en-us/azure/hdinsight/spark/apache-spark-connect-to-sql-database
https://docs.microsoft.com/en-us/azure/databricks/getting-started/spark/dataframes