@article {bnh-8368, title = {Up-scaling fuel hazard metrics derived from terrestrial laser scanning using a machine learning model}, journal = {Remote Sensing}, volume = {15}, year = {2023}, month = {02/2023}, pages = {1273}, abstract = {

undefined

}, keywords = {ALS, canopy, cover, elevated, field data, fuel hazard, fuel layers, fuel metrics, height, near-surface, random forest, up-scaling, visual assessments}, doi = {https://doi.org/10.3390/rs15051273}, url = {https://www.mdpi.com/2072-4292/15/5/1273}, author = {Ritu Taneja and Luke Wallace and Samuel Hillman and Karin Reinke and James Hilton and Simon Jones and Bryan Hally} } @article {bnh-8326, title = {Fuels3D - final project report}, number = {723}, year = {2022}, month = {03/2022}, institution = {Bushfire and Natural Hazards CRC}, address = {Melbourne}, abstract = {

Understanding fuel hazard is essential. Effective management of Australia{\textquoteright}s fire prone landscapes relies on accurate consistent and up-to-date fuel characterisation. This project seeks to create a quantitative methodology for calculating fuel hazard, in surface and near surface fuel layers, using affordable consumer grade equipment. It is hoped that this methodology will enhance and supplement existing visual estimation methods used by land management agencies across Australia and demonstrate the utility of moving towards new approaches capable of creating quantitative outputs. The \ method uses a series of systematically acquired photographs to create a 3D point cloud that captures vegetation elements in the surface and near surface vegetation layers and their horizontal and vertical structure. These point clouds are then processed to create the metrics for deriving fuel hazard estimates.

The project methodological tool-chain is divided into five major components:

Each of these methods are embedded in an AWS workflow. The aim being to provide firefighting and land management agencies with an end-to-end semi-automated methodology for collecting, analysing and visualising fuel hazard information.

Although a viable methodology was developed and implemented, results varied by ecosystem. Woodlands, plantations, low open forest, open grasslands and low open shrublands systems all had good image matching and end metric conversion rates (\>90\%). In contrast, closed and other grasslands, shrublands and tall closed forest fuel types all had sample conversion rates below 65\%. The explanation of these large variances in success rates were explored with a number of image acquisition and processing factors identified.

There are many benefits to standardising data collection and harmonising metrics for reporting fuel hazard. Unlike visual assessments the reference photographs and associated point clouds exist in perpetuity and can be re-processed when new techniques emerge for their analysis. Comparing data gathered in different states, territories and jurisdictions also becomes much easier.

Feedback from end users was mixed. While many land managers felt this quantitative methodology had much merit others commented it was too time-consuming to replace current practices. Other (more costly) point cloud collection methods (Terrestrial Laser Scanners and Mobile Laser Scanners (LiDAR) as well as optical depth camera systems) have presented themselves as alternatives during the course of the project. With this in mind the research team has enabled the AWS tool chain to ingest other point cloud data into the fourth and fifth workflow elements. \ 

}, keywords = {fuel hazards, fuels3D, image-based, near-surface, point clouds, surface}, issn = {723}, author = {Simon Jones and Karin Reinke and Johann Tiede and Luke Wallace and Bryan Hally and Mark Robey} } @article {bnh-8371, title = {Terrestrial Laser Scanning: an operational tool for fuel hazard mapping?}, journal = {Fire}, volume = {5}, year = {2022}, month = {04/2022}, pages = {85}, abstract = {

Fuel hazard estimates are vital for the prediction of fire behaviour and planning fuel treatment activities. Previous literature has highlighted the potential of Terrestrial Laser Scanning (TLS) to be used to assess fuel properties. However, operational uptake of these systems has been limited due to a lack of a sampling approach that balances efficiency and data efficacy. This study aims to assess whether an operational approach utilising Terrestrial Laser Scanning (TLS) to capture fuel information over an area commensurate with current fuel hazard assessment protocols implemented in South-Eastern Australia is feasible. TLS data were captured over various plots in South-Eastern Australia, utilising both low- and high-cost TLS sensors. Results indicate that both scanners provided similar overall representation of the ground, vertical distribution of vegetation and fuel hazard estimates. The analysis of fuel information contained within individual scans clipped to 4 m showed similar results to that of the fully co-registered plot (cover estimates of near-surface vegetation were within 10\%, elevated vegetation within 15\%, and height estimates of near-surface and elevated strata within 0.05 cm). This study recommends that, to capture a plot in an operational environment (balancing efficiency and data completeness), a sufficient number of non-overlapping individual scans can provide reliable estimates of fuel information at the near-surface and elevated strata, without the need for co-registration in the case study environments. The use of TLS within the rigid structure provided by current fuel observation protocols provides incremental benefit to the measurement of fuel hazard. Future research should leverage the full capability of TLS data and combine it with moisture estimates to gain a full realisation of the fuel hazard.

}, keywords = {fuel hazard, fuel structure, occlusion, remote sensing, Risk assessment, TLS}, doi = {https://doi.org/10.3390/fire5040085}, url = {https://www.mdpi.com/2571-6255/5/4/85}, author = {Luke Wallace and Samuel Hillman and Bryan Hally and Ritu Taneja and Andrew White and James McGlade} } @article {bnh-8257, title = {A comparison between TLS and UAS LiDAR to represent eucalypt crown fuel characteristics}, journal = {ISPRS Journal of Photogrammetry and Remote Sensing}, volume = {181}, year = {2021}, month = {11/2021}, pages = {295-307}, abstract = {

Advances in fire behaviour modelling provide a catalyst for the development of next generation fuel inputs. Fire simulations underpin risk and consequence mapping and inform decisions regarding ecological and social impacts of different fire regimes. Unoccupied Aerial Systems (UAS) carrying Light Detection and Ranging (LiDAR) sensors have been proposed as a source of structural information with potential for describing fine fuel properties. Whilst these systems have been shown to be capable of describing general vegetation distribution, the ability to distinguish between vegetation elements that contribute to fire spread and those that do not (such as large woody elements) is yet to be explored. This study evaluates the ability of UAS LiDAR point clouds to provide a description of crown fuel elements in eucalypt trees. This is achieved through comparison with dense Terrestrial Laser Scanning (TLS) that were manually attributed with a fuel description. Using the TLSeparation package TLS and UAS LiDAR point clouds achieved 84.6\% and 81.1\% overall accuracy respectively in the separation of crown fuel and wood in nine reference trees. When applying the same separation process across a 30 by 50\ m plot consisting of approximately 75 trees, total canopy fuel volume was found to be strongly correlated between the TLS and UAS LiDAR point clouds (r: 0.96, RMSE: 1.53\ m3). A lower canopy base height and greater distance between crown fuel regions within each crown supported visual inspection of the point clouds that TLS point clouds were able to represent the crown to a greater extent than UAS LiDAR point clouds. Despite these differences it is likely that a less complete representation of canopy fuel such as that generated from UAS LiDAR point clouds will suitably represent the crown and canopy fuel objects effectively for fire behaviour modelling purposes. The research presented in this manuscript highlights the potential of TLS and UAS LiDAR point clouds to provide repeatable, accurate 3D characterisation of canopy fuel properties.

}, keywords = {UAS Drone LiDAR 3D remote sensing TLS Fuel}, doi = {https://doi.org/10.1016/j.isprsjprs.2021.09.008}, url = {https://www.sciencedirect.com/science/article/pii/S0924271621002409}, author = {Samuel Hillman and Luke Wallace and Karin Reinke and Simon Jones} } @article {bnh-7907, title = {High-Resolution Estimates of Fire Severity - An Evaluation of UAS Image and LiDAR Mapping Approaches on a Sedgeland Forest Boundary in Tasmania, Australia }, journal = {Fire}, volume = {4}, year = {2021}, month = {03/2021}, chapter = {14}, abstract = {

With an increase in the frequency and severity of wildfires across the globe and resultant changes to long-established fire regimes, the mapping of fire severity is a vital part of monitoring ecosystem resilience and recovery. The emergence of unoccupied aircraft systems (UAS) and compact sensors (RGB and LiDAR) provide new opportunities to map fire severity. This paper conducts a comparison of metrics derived from UAS Light Detecting and Ranging (LiDAR) point clouds and UAS image based products to classify fire severity. A workflow which derives novel metrics describing vegetation structure and fire severity from UAS remote sensing data is developed that fully utilises the vegetation information available in both data sources. UAS imagery and LiDAR data were captured pre- and post-fire over a 300 m by 300 m study area in Tasmania, Australia. The study area featured a vegetation gradient from sedgeland vegetation (e.g., button grass 0.2m) to forest (e.g., Eucalyptus obliqua and Eucalyptus globulus 50m). To classify the vegetation and fire severity, a comprehensive set of variables describing structural, textural and spectral characteristics were gathered using UAS images and UAS LiDAR datasets. A recursive feature elimination process was used to highlight the subsets of variables to be included in random forest classifiers. The classifier was then used to map vegetation and severity across the study area. The results indicate that UAS LiDAR provided similar overall accuracy to UAS image and combined (UAS LiDAR and UAS image predictor values) data streams to classify vegetation (UAS image: 80.6\%; UAS LiDAR: 78.9\%; and Combined: 83.1\%) and severity in areas of forest (UAS image: 76.6\%, UAS LiDAR: 74.5\%; and Combined: 78.5\%) and areas of sedgeland (UAS image: 72.4\%; UAS LiDAR: 75.2\%; and Combined: 76.6\%). These results indicate that UAS SfM and LiDAR point clouds can be used to assess fire severity at very high spatial resolutio

}, keywords = {3D remote sensing, drone, fire severity, fuel structure, Lidar, photogrammetry, RPAS, structure, UAS, vegetation}, doi = {https://doi.org/10.3390/fire4010014}, url = {https://www.mdpi.com/2571-6255/4/1/14/htm}, author = {Samuel Hillman and Bryan Hally and Luke Wallace and Darren Turner and Arko Lucieer and Karin Reinke and Simon Jones} } @article {bnh-7870, title = {The influence of satellite imagery on landscape perception}, journal = {Landscape Research}, year = {2021}, month = {02/2021}, abstract = {

The perception of landscapes involves the process of categorising and differentiating surrounds according to sensory information and the experiences of individuals. Increasingly, due to the ubiquitous nature of virtual globe platforms, individuals are accessing visual information about their surrounding environment through satellite imagery. This investigation aims to examine how people{\textquoteright}s perceptions of landscapes are changing when our experiences increasingly occur in digital space, altering the perception paradigm from one where individuals analyse direct objects to one where indirect objects are key in the formation of their perceptions. A case study in Chile, South America, is used to explore the influence of satellite imagery with 52 survey participants responding to questions about land use and land cover (LULC) patterns of the area, before and after, unstructured exploration of the region using Google Earth. The results indicate that satellite imagery is influencing how individuals perceive LULC patterns within their direct surroundings.

}, keywords = {Google Earth, land cover, land use, landscape patterns, Landscape perception, satellite imagery}, doi = {https://doi.org/10.1080/01426397.2021.1886264}, url = {https://www.tandfonline.com/doi/abs/10.1080/01426397.2021.1886264}, author = {Daisy San Martin Saldias and Karin Reinke and Blythe McLennan and Luke Wallace} } @article {bnh-7523, title = {A comparison of terrestrial and UAS sensors for measuring fuel hazard in a dry sclerophyll forest}, journal = {International Journal of Applied Earth Observation and Geoinformation}, volume = {95}, year = {2020}, month = {11/2020}, abstract = {

In recent years, Unoccupied Aircraft Systems (UAS) have been used to capture information on forest structure in unprecedented detail. Pioneering studies in this field have shown that high spatial resolution images and Light Detecting And Ranging (LiDAR) data captured from these platforms provide detailed information describing the dominant tree elements of canopy cover and biomass. However, to date, few studies have investigated the arrangement of vegetation elements that contribute directly to fire propagation in UAS LiDAR point clouds; that is the surface, near-surface, elevated and intermediate-canopy vegetation. This paper begins to address this gap in the literature by exploring the use of image-based and LiDAR 3D representations collected using UAS platforms, for describing forest structure properties. Airborne and terrestrial 3D datasets were captured in a dry sclerophyll forest in south-eastern Australia. Results indicate that UAS LiDAR point clouds contain information that can describe fuel properties in all strata. Similar estimates of canopy cover (TLS: 68.27\% and UAS LiDAR: 64.20\%) and sub-canopy cover (Elevated cover TLS: 44.94\%, UAS LiDAR: 32.27\%, combined surface and near-surface cover TLS: 96.10\% UAS LiDAR: 93.56\%) to TLS were achieved using this technology. It was also shown that the UAS SfM photogrammetric technique significantly under performed in the representation of the canopy and below canopy structure (canopy cover - 20.31\%, elevated cover 10.09\%). This caused errors to be propagated in the estimate of heights in the elevated fuel layer (TLS: 0.51\ m, UAS LiDAR: 0.34\ m, UAS SfM: 0.15\ m). A method for classifying fuel hazard layers is also presented which identifies vegetation connectivity. These results indicate that information describing the below canopy vertical structure is present within the UAS LiDAR point clouds and can be exploited through this novel classification approach for fire hazard assessment. For fire prone countries, this type of information can provide important insight into forest fuels and the potential fire behaviour and impact of fire under different scenarios.

}, author = {Samuel Hillman and Luke Wallace and Arko Lucieer and Karin Reinke and Darren Turner and Simon Jones} } @article {bnh-7380, title = {An early exploration of the use of the Microsoft Azure Kinect for estimation of urban tree Diameter at Breast Height}, journal = {Remote Sensing Letters}, volume = {11}, year = {2020}, month = {09/2020}, pages = {963-972}, abstract = {
Forest and urban tree inventory measurements are increasingly adopting Remote Sensing (RS) techniques due to the accurate and rapid estimates available compared to conventional methods. The focus of this study is to assess the accuracy and potential application of the Microsoft Azure Kinect {\textendash} a lightweight depth sensor {\textendash} for outdoor measurement of tree stem Diameter at Breast Height (DBH). Individual urban trees (n\ =\ 51) were recorded from one viewing angle at a distance of 1 m to 5 m away using the various Field of View (FOV) settings on the depth sensor, from which resultant point clouds provided DBH estimates using a circle-fitting approach. The optimal capture method was observed at a distance of 2 m using the binned Near Field of View (NFOV) setting. Root Mean Square Error (RMSE) of DBH using this method was 8.43 cm; however, after removing trees with irregular or non-circular stems, this improved to 3.53 cm. Variations in ambient light were observed to have little effect on DBH estimates. The results of this study suggest when in an outdoor environment, the Azure Kinect should be used at a distance no greater than 3 m away, using the binned NFOV sensor setting, for DBH estimates.
}, keywords = {DBH, remote sensing, trees}, doi = {https://doi.org/10.1080/2150704X.2020.1802528}, url = {https://www.tandfonline.com/doi/abs/10.1080/2150704X.2020.1802528}, author = {James McGlade and Luke Wallace and Bryan Hally and Andrew White and Karin Reinke and Simon Jones} } @article {bnh-7501, title = {Quantifying fuel hazard assessments - Fuels3D annual report 2018-2019}, number = {627}, year = {2020}, month = {12/2019}, institution = {Bushfire and Natural Hazards CRC}, address = {MELBOURNE}, abstract = {

This annual report summarises the year 2019 for the Fuels3D project.\  This project was funded as half a project; the work considers the lack of repeatability and reliability with current field fuel hazard assessments. It demonstrates the precision of a semi-automated non-visual based assessments as compared to those collected through traditional visual assessment.\  The opportunity to bring together off-the-shelf, smart phone cameras and consumer grade digital cameras with advances in computer vision and photogrammetric techniques provide a cheap alternative for quantitative assessments compared to more accurate, but more expensive 3D mapping technologies (i.e. Lidar TLS).\  A tool chain and suite of computer vision and photogrammetric algorithms that use images captured in the field to produce 3D point clouds from which fuel hazard metrics are calculated. The developed technique is adaptive to 3D point clouds captured from other terrestrial technologies and can allow for changes in data collection technologies.

Highlights of 2018-2019 have included:

}, keywords = {fuel, fuels3D, hazard assessments, quantifying}, issn = {627}, author = {Bryan Hally and Karin Reinke and Luke Wallace and Simon Jones} } @article {bnh-7413, title = {Quantifying fuel hazard assessments - Fuels3D annual report 2019-2020}, number = {619}, year = {2020}, month = {10/2020}, institution = {Bushfire and Natural Hazards CRC}, address = {MELBOURNE}, abstract = {

This annual report summarises the year 2019 for the Fuels3D project.\  This project was funded as half a project; the work considers the lack of repeatability and reliability with current field fuel hazard assessments. It demonstrates the precision of a semi-automated non-visual based assessments as compared to those collected through traditional visual assessment.\  The opportunity to bring together off-the-shelf, smart phone cameras and consumer grade digital cameras with advances in computer vision and photogrammetric techniques provide a cheap alternative for quantitative assessments compared to more accurate, but more expensive 3D mapping technologies (i.e. Lidar TLS).\  A tool chain and suite of computer vision and photogrammetric algorithms that use images captured in the field to produce 3D point clouds from which fuel hazard metrics are calculated. The developed technique is adaptive to 3D point clouds captured from other terrestrial technologies and can allow for changes in data collection technologies.

Highlights of 2018-2019 have included:

}, keywords = {fuel hazard, fuels3D}, issn = {619}, author = {Bryan Hally and Karin Reinke and Luke Wallace and Simon Jones} } @article {bnh-7439, title = {Terrestrial Image-Based Point Clouds for Mapping Near-Ground Vegetation Structure: Potential and Limitations}, journal = {Fire}, volume = {3}, year = {2020}, month = {10/2020}, chapter = {59}, abstract = {

Site-specific information concerning fuel hazard characteristics is needed to support wildfire management interventions and fuel hazard reduction programs. Currently, routine visual assessments provide subjective information, with the resulting estimate of fuel hazard varying due to observer experience and the rigor applied in making assessments. Terrestrial remote sensing techniques have been demonstrated to be capable of capturing quantitative information on the spatial distribution of biomass to inform fuel hazard assessments. This paper explores the use of image-based point clouds generated from imagery captured using a low-cost compact camera for describing the fuel hazard within the surface and near-surface layers. Terrestrial imagery was obtained at three distances for five target plots. Subsets of these images were then processed to determine the effect of varying overlap and distribution of image captures. The majority of the point clouds produced using this image-based technique provide an accurate representation of the 3D structure of the surface and near-surface fuels. Results indicate that high image overlap and pixel size are critical; multi-angle image capture is shown to be crucial in providing a representation of the vertical stratification of fuel. Terrestrial image-based point clouds represent a viable technique for low cost and rapid assessment of fuel structure.

}, keywords = {Structure from Motion; vegetation structure; fuel hazard; Terrestrial Laser Scanning}, doi = {https://doi.org/10.3390/fire3040059}, url = {https://www.mdpi.com/2571-6255/3/4/59/htm}, author = {Luke Wallace and Bryan Hally and Samuel Hillman and Simon Jones and Karin Reinke} } @article {bnh-5481, title = {Assessing the ability of image based point clouds captured from a UAV to measure the terrain in the presence of canopy cover}, journal = {forests}, volume = {10}, year = {2019}, month = {04/2019}, abstract = {

Point clouds captured from Unmanned Aerial Systems are increasingly relied upon to provide information describing the structure of forests. The quality of the information derived from these point clouds is dependent on a range of variables, including the type and structure of the forest, weather conditions and flying parameters. A key requirement to achieve accurate estimates of height based metrics describing forest structure is a source of ground information. This study explores the availability and reliability of ground surface points available within point clouds captured in six forests of different structure (canopy cover and height), using three image capture and processing strategies, consisting of nadir, oblique and composite nadir/oblique image networks. The ground information was extracted through manual segmentation of the point clouds as well as through the use of two commonly used ground filters, LAStools lasground and the Cloth Simulation Filter. The outcomes of these strategies were assessed against ground control captured with a Total Station. Results indicate that a small increase in the number of ground points captured (between 0 and 5\% of a 10 m radius plot) can be achieved through the use of a composite image network. In the case of manually identified ground points, this reduced the root mean square error (RMSE) error of the terrain model by between 1 and 11 cm, with greater reductions seen in plots with high canopy cover. The ground filters trialled were not able to exploit the extra information in the point clouds and inconsistent results in terrain RMSE were obtained across the various plots and imaging network configurations. The use of a composite network also provided greater penetration into the canopy, which is likely to improve the representation of mid-canopy elements.

}, keywords = {drones, Fire, forest measurement, image based point clouds, RPAS, structure from motion, UAS}, doi = {https://doi.org/10.3390/f10030284}, url = {https://www.mdpi.com/1999-4907/10/3/284}, author = {Luke Wallace and Chris Bellman and Bryan Hally and Jaime Hernandez and Simon Jones and Samuel Hillman} } @conference {bnh-6527, title = {Fuels3D: barking up the wrong tree and beyond}, booktitle = {AFAC19 powered by INTERSCHUTZ - Bushfire and Natural Hazards CRC Research Forum}, year = {2019}, month = {12/2019}, publisher = {Australian Institute for Disaster Resilience}, organization = {Australian Institute for Disaster Resilience}, address = {Melbourne}, abstract = {

Improvement of the understanding of how fuel characteristics correlate with fire behaviour and severity is critical to the ongoing handling of risk and recovery in fire-prone environments. Current standards and protocols for describing fuel hazard (for example, {\textquoteleft}Overall Fuel Hazard Assessment Guide{\textquoteright}, Victorian Department of Sustainability and Environment) and post-burn severity (for example, {\textquoteleft}Fire Severity Assessment Guide{\textquoteright}, Victorian Department of Sustainability and Environment) were written for collection of information in the field. The data collected are largely subjective descriptions of the landscape. The ability of information from these assessment techniques to be adapted to modern risk assessment tools such as fire behavior models, or for the calibration and validation of datasets, is limited. Quantitative data-rich methods of measuring and assessing fuel load and structure are the missing link between the knowledge of land management personnel in the field, and the model drivers and decision makers at organizational level.

Handheld devices with high quality sensors, in the form of offthe-shelf cameras, are increasingly ubiquitous, as is the availability of 3D point cloud data collected from active sensing instruments on terrestrial and aerial platforms. Rapid and comprehensive capture of information by these devices, coupled with the use of computer vision techniques, allows for the 3D description of the surrounding environment to be exploited to provide robust measurement of metrics that can be built into existing fuel hazard assessment frameworks. Providing key metrics as data products rather than a single product enables flexibility across jurisdictions and ecosystem types, and capacity to adapt as end-user requirements change.

The Fuels3D project has created a suite of tools and methods for image capture in the field during fuel hazard assessments. 3D point clouds are generated using computer vision and photogrammetry techniques. From these 3D point clouds, scale is added, and decision rules are programmed to calculate quantifiable surface / near-surface metrics that replicate those
used in current fuel hazard visual assessment guides. Case studies are highlighted here.

Download the full non-peer reviewed research proceedings\ from the Bushfire and Natural Hazards CRC Research Forumhere.

}, keywords = {data collection, Fire behaviour, fuel hazard, risk management, technology}, url = {https://knowledge.aidr.org.au/resources/australian-journal-of-emergency-management-monograph-series/}, author = {Karin Reinke and Luke Wallace and Samuel Hillman and Bryan Hally and Simon Jones} } @article {bnh-6222, title = {A Method for Validating the Structural Completeness of Understory Vegetation Models Captured with 3D Remote Sensing}, journal = {Remote Sensing}, volume = {11}, year = {2019}, month = {09/2019}, abstract = {

Characteristics describing below canopy vegetation are important for a range of forest ecosystem applications including wildlife habitat, fuel hazard and fire behaviour modelling, understanding forest recovery after disturbance and competition dynamics. Such applications all rely on accurate measures of vegetation structure. Inherent in this is the assumption or ability to demonstrate measurement accuracy. 3D point clouds are being increasingly used to describe vegetated environments, however limited research has been conducted to validate the information content of terrestrial point clouds of understory vegetation. This paper describes the design and use of a field frame to co-register point intercept measurements with point cloud data to act as a validation source. Validation results show high correlation of point matching in forests with understory vegetation elements with large mass and/or surface area, typically consisting of broad leaves, twigs and bark 0.02 m diameter or greater in size (SfM, MCC 0.51{\textendash}0.66; TLS, MCC 0.37{\textendash}0.47). In contrast, complex environments with understory vegetation elements with low mass and low surface area showed lower correlations between validation measurements and point clouds (SfM, MCC 0.40 and 0.42; TLS, MCC 0.25 and 0.16). The results of this study demonstrate that the validation frame provides a suitable method for comparing the relative performance of different point cloud generation processes

}, keywords = {3D remote sensing, biomass, forest measurement, structure from motion, terrestrial laser scanning, validation, vegetation structure}, doi = {https://doi.org/10.3390/rs11182118}, url = {https://www.mdpi.com/2072-4292/11/18/2118}, author = {Samuel Hillman and Luke Wallace and Karin Reinke and Bryan Hally and Simon Jones and Daisy Saldias} } @article {bnh-5136, title = {Advances in active fire detection using a multi-temporal method for next-generation geostationary satellite data}, journal = {International Journal of Digital Earth}, year = {2018}, month = {07/2018}, abstract = {

A vital component of fire detection from remote sensors is the accurate estimation of the background temperature of an area in fire{\textquoteright}s absence, assisting in identification and attribution of fire activity. New geostationary sensors increase the data available to describe background temperature in the temporal domain. Broad area methods to extract the expected diurnal cycle of a pixel using this temporally rich data have shown potential for use in fire detection. This paper describes an application of a method for priming diurnal temperature fitting of imagery from the Advanced Himawari Imager. The BAT method is used to provide training data for temperature fitting of target pixels, to which thresholds are applied to detect thermal anomalies in 4 μm imagery over part of Australia. Results show the method detects positive thermal anomalies with respect to the diurnal model in up to 99\% of cases where fires are also detected by Low Earth Orbiting (LEO) satellite active fire products. In absence of LEO active fire detection, but where a burned area product recorded fire-induced change, this method also detected anomalous activity in up to 75\% of cases. Potential improvements in detection time of up to 6 h over LEO products are also demonstrated.

}, doi = {https://doi.org/10.1080/17538947.2018.1497099}, url = {https://www.tandfonline.com/doi/abs/10.1080/17538947.2018.1497099?journalCode=tjde20}, author = {Bryan Hally and Luke Wallace and Karin Reinke and Simon Jones and Andrew Skidmore} } @article {bnh-5137, title = {Estimating fire background temperature at a geostationary scale - an evaluation of contextual methods for AHI-8}, journal = {Remote Sensing}, volume = {10}, year = {2018}, month = {08/2018}, chapter = {1368}, abstract = {

An integral part of any remotely sensed fire detection and attribution method is an estimation of the target pixel{\textquoteright}s background temperature. This temperature cannot be measured directly independent of fire radiation, so indirect methods must be used to create an estimate of this background value. The most commonly used method of background temperature estimation is through derivation from the surrounding obscuration-free pixels available in the same image, in a contextual estimation process. This method of contextual estimation performs well in cloud-free conditions and in areas with homogeneous landscape characteristics, but increasingly complex sets of rules are required when contextual coverage is not optimal. The effects of alterations to the search radius and sample size on the accuracy of contextually derived brightness temperature are heretofore unexplored. This study makes use of imagery from the AHI-8 geostationary satellite to examine contextual estimators for deriving background temperature, at a range of contextual window sizes and percentages of valid contextual information. Results show that while contextual estimation provides accurate temperatures for pixels with no contextual obscuration, significant deterioration of results occurs when even a small portion of the target pixel{\textquoteright}s surroundings are obscured. To maintain the temperature estimation accuracy, the use of no less than 65\% of a target pixel{\textquoteright}s total contextual coverage is recommended. The study also examines the use of expanding window sizes and their effect on temperature estimation. Results show that the accuracy of temperature estimation decreases significantly when expanding the examined window, with a 50\% increase in temperature variability when using a larger window size than\ 5{\texttimes}5pixels, whilst generally providing limited gains in the total number of temperature estimates (between 0.4\%{\textendash}4.4\% of all pixels examined). The work also presents a number of case study regions taken from the AHI-8 disk in more depth, and examines the causes of excess temperature variation over a range of topographic and land cover conditions.

}, doi = {https://doi.org/10.3390/rs10091368}, url = {https://www.mdpi.com/2072-4292/10/9/1368}, author = {Bryan Hally and Luke Wallace and Karin Reinke and Simon Jones and Chermelle Engel and Andrew Skidmore} } @article {bnh-5338, title = {Estimating Fire Background Temperature at a Geostationary Scale{\textemdash}An Evaluation of Contextual Methods for AHI-8}, journal = {Remote Sensing }, volume = {10}, year = {2018}, month = {08/2018}, chapter = {1368}, abstract = {

An integral part of any remotely sensed fire detection and attribution method is an estimation of the target pixel{\textquoteright}s background temperature. This temperature cannot be measured directly independent of fire radiation, so indirect methods must be used to create an estimate of this background value. The most commonly used method of background temperature estimation is through derivation from the surrounding obscuration-free pixels available in the same image, in a contextual estimation process. This method of contextual estimation performs well in cloud-free conditions and in areas with homogeneous landscape characteristics, but increasingly complex sets of rules are required when contextual coverage is not optimal. The effects of alterations to the search radius and sample size on the accuracy of contextually derived brightness temperature are heretofore unexplored. This study makes use of imagery from the AHI-8 geostationary satellite to examine contextual estimators for deriving background temperature, at a range of contextual window sizes and percentages of valid contextual information. Results show that while contextual estimation provides accurate temperatures for pixels with no contextual obscuration, significant deterioration of results occurs when even a small portion of the target pixel{\textquoteright}s surroundings are obscured. To maintain the temperature estimation accuracy, the use of no less than 65\% of a target pixel{\textquoteright}s total contextual coverage is recommended. The study also examines the use of expanding window sizes and their effect on temperature estimation. Results show that the accuracy of temperature estimation decreases significantly when expanding the examined window, with a 50\% increase in temperature variability when using a larger window size than\ 5{\texttimes}5pixels, whilst generally providing limited gains in the total number of temperature estimates (between 0.4\%{\textendash}4.4\% of all pixels examined). The work also presents a number of case study regions taken from the AHI-8 disk in more depth, and examines the causes of excess temperature variation over a range of topographic and land cover conditions.

}, keywords = {contextual methods, fire attribution, fire background temperature, geostationary sensors}, doi = {10.3390/rs10091368}, url = {https://www.mdpi.com/2072-4292/10/9/1368}, author = {Bryan Hally and Luke Wallace and Karin Reinke and Simon Jones and Chermelle Engel and Andrew Skidmore} } @conference {bnh-4778, title = {Experiences in the in-field utilisation of fuels3D}, booktitle = {AFAC18}, year = {2018}, month = {09/2018}, publisher = {Bushfire and Natural Hazards CRC}, organization = {Bushfire and Natural Hazards CRC}, address = {Perth}, abstract = {

Fuels3D provides a rapid method to collect quantified information describing fuel hazard using a smartphone. The method requires users to collect a number of photos along a transect within a fuel hazard environment. The photos are processed using photogrammetric algorithms to provide a three-dimensional representation of the fuel, and subsequently estimates of fuel hazard metrics including fuel height, cover and fate (dead/alive). This paper reports on the initial large scale utilisation trial of the Fuels3D fuel hazard workflow. Project end-users from Victoria, South Australia and ACT were provided with a smartphone app (iOS or Android) that allowed photos to be easily collected following the Fuels3D method. End-users were instructed to collect samples within a variety of fuel types and hazards in order to test the potential and limitations of the app.\  These photos were transferred utilising the cloudstor research infrastructure to a processing PC, where estimates of fuel hazard metrics were derived and reported back to end-users.\  Initial results of this trial indicate that Fuels3D is capable of quanitifed estimates of fuel hazard metrics that are more precise than those achieved with visual fuel hazard assessments.

}, author = {Luke Wallace and Karin Reinke and Simon Jones and Samuel Hillman and Adam J. Leavesley and Simeon Telfer and Ian Thomas} } @article {bnh-5091, title = {Fuels3D: annual project report 2017-18}, year = {2018}, month = {01/2019}, institution = {Bushfire and Natural Hazards CRC}, abstract = {

Smartphone technology with high quality sensors are becoming increasing ubiquitous. This technology, when combined with computer visions methods allows for the 3D description of the surrounding environment. This project exploits this technology to provide robust measurement of vegetation structural metrics that can be built into existing fuel hazard assessment frameworks.\ \ Providing key metrics as data products rather than a single end product enables flexibility across jurisdictions and ecosystem types, and capacity to adapt as end-user requirements change.

}, issn = {428}, author = {Karin Reinke and Simon Jones and Luke Wallace} } @article {bnh-5138, title = {Implementation of a new algorithm resulting in improvements in accuracy and resolution of SEVIRI hotspot products}, journal = {Remote Sensing Letters}, volume = {9}, year = {2018}, month = {07/2018}, abstract = {

Active wildfire detection, surveillance and mapping is an important application of satellite remote sensing. The Active Fire Monitoring (FIR) products, from the Spinning Enhanced Visible and Infrared Imager (SEVIRI) on board the Meteosat Second Generation (MSG) satellites, provide rapid-fire detection data every 5 to 15 over the European and African continents. However, the real world application of this high temporal frequency data is hindered due to the product spatial resolution of 3\ \ 3, thus limiting the application in fire surveillance and mapping activities. This letter implements a modified version of the Advanced Himawari-8 Imager {\textendash} Fire Surveillance Algorithm (AHI-FSA) for SEVIRI with the aim of improving the spatial resolution of fire activity mapping. Initial results demonstrate the algorithm was able to improve the resolution of fire detection from 3\ \ 3\ km to 1\ \ 1\ km and simultaneously reduce the commission and omission errors by 25\% and 16\% respectively.

}, doi = {https://doi.org/10.1080/2150704X.2018.1484955}, url = {https://www.tandfonline.com/doi/abs/10.1080/2150704X.2018.1484955?journalCode=trsl20}, author = {Chathura Wickramasinghe and Luke Wallace and Karin Reinke and Simon Jones} } @article {bnh-5139, title = {Intercomparison of Himawari-8 AHI-FSA with MODIS and VIIRS active fire products}, journal = {International Journal of Digital Earth}, year = {2018}, month = {09/2018}, abstract = {

The AHI-FSA (Advanced Himawari Imager - Fire Surveillance Algorithm) is a recently developed algorithm designed to support wildfire surveillance and mapping using the geostationary Himawari-8 satellite. At present, the AHI-FSA algorithm has only been tested on a number of case study fires in Western Australia. Initial results demonstrate potential as a wildfire surveillance algorithm providing high frequency (every 10 minutes), multi-resolution fire-line detections. This paper intercompares AHI-FSA across the Northern Territory of Australia (1.4 million km2) over a ten-day period with the well-established fire products from LEO (Low Earth Orbiting) satellites: MODIS (Moderate Resolution Imaging Spectroradiometer) and VIIRS (Visible Infrared Imaging Radiometer Suite). This paper also discusses the difficulties and solutions when comparing high temporal frequency fire products with existing low temporal resolution LEO satellite products. The results indicate that the multi-resolution approach developed for AHI-FSA is successful in mapping fire activity at 500 m. When compared to the MODIS, daily AHI-FSA omission error was only 7\%. High temporal frequency data also results in AHI-FSA observing fires, at times, three hours before the MODIS overpass with much-enhanced detail on fire movement.

}, doi = {https://doi.org/10.1080/17538947.2018.1527402}, url = {https://www.tandfonline.com/doi/abs/10.1080/17538947.2018.1527402?journalCode=tjde20}, author = {Chathura Wickramasinghe and Luke Wallace and Karin Reinke and Simon Jones} } @article {bnh-3514, title = {A Broad-Area Method for the Diurnal Characterisation of Upwelling Medium Wave Infrared Radiation}, journal = {Remote Sensing}, volume = {9}, year = {2017}, month = {02/2017}, chapter = {167}, abstract = {

Fire detection from satellite sensors relies on an accurate estimation of the unperturbed state of a target pixel, from which an anomaly can be isolated. Methods for estimating the radiation budget of a pixel without fire depend upon training data derived from the location{\textquoteright}s recent history of brightness temperature variation over the diurnal cycle, which can be vulnerable to cloud contamination and the effects of weather. This study proposes a new method that utilises the common solar budget found at a given latitude in conjunction with an area{\textquoteright}s local solar time to aggregate a broad-area training dataset, which can be used to model the expected diurnal temperature cycle of a location. This training data is then used in a temperature fitting process with the measured brightness temperatures in a pixel, and compared to pixel-derived training data and contextual methods of background temperature determination. Results of this study show similar accuracy between clear-sky medium wave infrared upwelling radiation and the diurnal temperature cycle estimation compared to previous methods, with demonstrable improvements in processing time and training data availability. This method can be used in conjunction with brightness temperature thresholds to provide a baseline for upwelling radiation, from which positive thermal anomalies such as fire can be isolated.

}, doi = {10.3390/rs9020167}, url = {http://www.mdpi.com/2072-4292/9/2/167}, author = {Bryan Hally and Luke Wallace and Karin Reinke and Simon Jones} } @article {bnh-4197, title = {Disaster landscape attribution: annual report 2016-17}, number = {313}, year = {2017}, month = {09/2017}, institution = {Bushfire and Natural Hazards CRC}, address = {Melbourne}, abstract = {

What is the problem?

Monitoring bushfires requires timely information on their early detection, location, intensity and configuration. Their management requires timely information on fuel hazard condition and the efficacy of fuel reduction measures. This project seeks to use remote sensing to acquire this information at multiple spatial scales.

Why is it important?

By enhancing the timeliness and accuracy of observations and measurements of bushfire threatened and affected landscapes, our mitigation activities and response capacities are further strengthened. The provision of quantitative fire severity assessments informs the way in which we protect against the increasing threat of bushfire and inform our immediate to long-term recovery and rehabilitation efforts in response to bushfire events.

How are we going to solve it?

Our project is evaluating and validating current satellite based remote sensing options for active fire detection and surveillance. Using simulations and real world experiments we are determining the accuracy with which fires can be detected, their temperature and shape determined, for a range of landscapes. Our project is also creating new techniques and protocols for the rapid attribution of fire landscapes (pre- and post-fire). These techniques seek to add quantitative rigour to existing fuel hazard estimation practices.

How have we done?

This project brings together researchers from around the world including RMIT, the German Aerospace Agency DLR, CSIRO, the University of Twente in the Netherlands, Geoscience Australia and the Bureau of Meteorology. The project attributes fire landscapes using the latest satellite based thermal earth observation systems for active fire surveillance. Structure from Motion (SfM) and Terrestrial Laser Scanning (TLS) technologies and techniques are used to quantify and map changes in the landscape before, and after, a fire event. This report provides a background to the project and discusses the key research questions being asked and describes the progress made. Key achievements over the last year are described and linked to research outputs and end user engagement and operations. The report concludes with activities planned for the year ahead and a list of currently integrated project members.

Highlights of 2016-2017 have included:

}, issn = {313}, author = {Simon Jones and Karin Reinke and Luke Wallace} } @article {bnh-4156, title = {Emerging technologies for estimating fuel hazard}, number = {335}, year = {2017}, month = {10/2017}, institution = {Bushfire and Natural Hazards CRC}, address = {Melbourne}, abstract = {

The increasing risk of wildfire resulting from climate change has demanded an increase in information to support mitigation, response and recovery activities\ by fire management agencies. Subsequently, there is a need for an ongoing review of currently available and on-the-horizon information and technology.

Fire has important national significance as Australia faces ongoing\ environmental issues including loss of biodiversity, increasing urbanisation into bushland environments and increasing risks of wildfire. Fire regimes are an integral part of the ecosystem processes of Australian forests and a prominent disturbance factor. It affects successional rates of ecosystems, species diversity, can increase habitat fragmentation and alter landscape functioning.\ At the same time, fire is an important tool in management for ecosystem health and is frequently used for fuel hazard reduction.

Remote sensing data can assist fire management at three stages relative to fire occurrence including (i) Before the fire (fuel hazard measures, time since last burn) to assist fire prevention or minimisation activities, (ii) During the fire (near real-time detection and location of active fire areas and (iii) After the fire (mapping and assessment of burned areas).\ This report focuses on the use of sensing technology for generating a 3D representation of a feature or landscape. It examines the potential of emerging technology for measuring the structure and amount of vegetation within the landscape pre and post fire. Initial assessment of the sensing technology for mapping these environments is made based on the stage of maturity, sampling area, estimate accuracy and the expertise required to operate.

}, issn = {335}, author = {Luke Wallace and Karin Reinke and Simon Jones} } @conference {bnh-3892, title = {Enhanced estimation of background temperature for fire detection using new geostationary sensors}, booktitle = {AFAC17}, year = {2017}, month = {09/2017}, publisher = {Bushfire and Natural Hazards CRC}, organization = {Bushfire and Natural Hazards CRC}, address = {Sydney}, abstract = {

Recent increases in the frequency and intensity of active fire has heightened the importance of remote sensing as a source of early warning information for fire incidents. The launches of new geostationary sensors, such as Himawari-8 over the Asia-Pacific, have vastly increased the information available with which to detect and attribute these incidents, with observations every 10 minutes possible over key parts of the electromagnetic spectrum (3.8 -- 4{\textmu}m). Remotely sensed fire products such as Sentinel Hotspots and the MODIS active fire product have focussed upon use of contextually derived background temperatures for isolating hotspots, dictated by the low temporal frequency of available images. This research proposes a new paradigm in fire detection, which utilises the increased temporal resolutions of geostationary sensor imagery to provide a baseline dataset for land surface temperature estimation based upon location and time of day. To achieve this, a multi-temporal diurnal characterisation of temperature is calculated for each pixel based upon a large area latitudinal transect. Hot spot anomalies are then identified based upon the deviation of the location{\textquoteright}s temperature from the expected diurnal cycle. Validation of the fire detection algorithm has focussed upon case study fires from the 2016/17 fire season, by way of inter-comparison with commonly used MODIS and VIIRS active fire products, and a burned area product. Results show increased capability for early fire detection using the new algorithm in comparison to traditional single image contextual algorithms employed for polar orbiting systems. Other advantages include notable resilience to sources of occlusion such as cloud and smoke. Further research will focus on the wider application of this method across the Australian continent and methods for countering more challenging detection conditions.

}, author = {Bryan Hally and Luke Wallace and Karin Reinke and Chathura Wickramasinghe and Simon Jones} } @article {bnh-5074, title = {Investigating surface and near-surface bushfire fuel attributes: a comparison between visual assessments and image-based point clouds}, journal = {Sensors}, volume = {17}, year = {2017}, month = {04/2017}, chapter = {910}, abstract = {

Visual assessment, following guides such as the Overall Fuel Hazard Assessment Guide (OFHAG), is a common approach for assessing the structure and hazard of varying bushfire fuel layers. Visual assessments can be vulnerable to imprecision due to subjectivity between assessors, while emerging techniques such as image-based point clouds can offer land managers potentially more repeatable descriptions of fuel structure. This study compared the variability of estimates of surface and near-surface fuel attributes generated by eight assessment teams using the OFHAG and Fuels3D, a smartphone method utilising image-based point clouds, within three assessment plots in an Australian lowland forest. Surface fuel hazard scores derived from underpinning attributes were also assessed. Overall, this study found considerable variability between teams on most visually assessed variables, resulting in inconsistent hazard scores. Variability was observed within point cloud estimates but was, however, on average two to eight times less than that seen in visual estimates, indicating greater consistency and repeatability of this method. It is proposed that while variability within the Fuels3D method may be overcome through improved methods and equipment, inconsistencies in the OFHAG are likely due to the inherent subjectivity between assessors, which may be more difficult to overcome. This study demonstrates the capability of the Fuels3D method to efficiently and consistently collect data on fuel hazard and structure, and, as such, this method shows potential for use in fire management practices where accurate and reliable data is essential.

}, doi = {https://dx.doi.org/10.3390/s17040910}, url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5426834/}, author = {Christine Spits and Luke Wallace and Karin Reinke} } @conference {bnh-3913, title = {Mapping the efficacy of an Australian fuel reduction burn using Fuels3D point clouds}, booktitle = {AFAC17}, year = {2017}, month = {09/2017}, publisher = {Bushfire and Natural Hazards CRC}, organization = {Bushfire and Natural Hazards CRC}, address = {Sydney}, abstract = {

Fuel reduction burns are commonly used in fire-prone forests to reduce the risk of wildfire and increase ecosystem resilience. As such producing quantified assessments of fire-induced change is important to understanding the success of the intervention. Remote sensing has also been employed for assessing fuel hazard and fire severity. Satellite, airborne and UAV remote sensing, for example, have shown potential for assessing the effects of large wildfires and fuel hazard in areas of open canopy. Fuel reduction burns, however, often take place under dense canopy and result in little or no change to the canopy cover. As such terrestrial techniques are needed to quantify the efficacy of these burns.

This study presents a case study on the use of image based point clouds, captured terrestrially following the fuels3D methodology outlined in Wallace et al. (2016), for describing the change in fuel structure induced by a low intensity fuel reduction burn. The specific objectives of this study were to evaluate whether fuel structure maps produced from fuels3D point clouds are sensitive to the changes that occur during a low intensity fuel reduction burn, and how these changes may be quantified.

}, author = {Luke Wallace and Karin Reinke and Simon Jones and Bryan Hally and Samuel Hillman and Christine Spits} } @article {bnh-5095, title = {Non-destructive estimation of above-ground surface and near-surface biomass using 3D terrestrial remote sensing techniques}, journal = {Methods in Ecology and Evolution}, volume = {8}, year = {2017}, month = {02/2017}, abstract = {
  1. Quantitative measurements of above-ground vegetation biomass are vital to a range of ecological and natural resource management applications. Remote-sensing techniques, such as terrestrial laser scanning (TLS) and image-based point clouds, are potentially revolutionary techniques for measuring vegetation biomass and deriving other related, structural metrics for these purposes.
  2. Surface vegetation biomass (up to 25\ cm) in pasture, forest, and woodland environments is estimated from a 3D point cloud derived from a small number of digital images. Volume is calculated, using the 3D cloud and regressed against dry weight to provide an estimate of biomass. Assessment of the method is made through comparison to 3D point clouds collected through TLS surveys.
  3. High correlation between destructively sampled biomass and vegetation volume derived from TLS and image-based point clouds in the pasture (TLS\ urn:x-wiley:2041210X:media:mee312759:mee312759-math-0001, image based\ urn:x-wiley:2041210X:media:mee312759:mee312759-math-0002), dry grassy forest (TLS\ urn:x-wiley:2041210X:media:mee312759:mee312759-math-0003, image based\ urn:x-wiley:2041210X:media:mee312759:mee312759-math-0004) and lowland forest (TLS\ urn:x-wiley:2041210X:media:mee312759:mee312759-math-0005, image based\ urn:x-wiley:2041210X:media:mee312759:mee312759-math-0006) environments was found. Occlusion caused by standing vegetation in the woodland environment resulted in moderate correlation between TLS derived volume and biomass (urn:x-wiley:2041210X:media:mee312759:mee312759-math-0007). The effects of surrounding vegetation on the image-based technique resulted in 3D point clouds being resolved for only 40\% of the samples in this environment.
  4. The results of this study demonstrate that image-based point cloud techniques are highly viable for the measurement of surface biomass. In contrast to TLS, volume and biomass data can be captured using low-cost equipment and relatively little expertise.

}, doi = {https://doi.org/10.1111/2041-210X.12759}, url = {https://besjournals.onlinelibrary.wiley.com/doi/abs/10.1111/2041-210X.12759}, author = {Luke Wallace and Samuel Hillman and Karin Reinke and Bryan Hally} } @article {bnh-3436, title = {An Assessment of Pre- and Post Fire Near Surface Fuel Hazard in an Australian Dry Sclerophyll Forest Using Point Cloud Data Captured Using a Terrestrial Laser Scanner}, journal = {Remote Sensing}, volume = {8}, year = {2016}, month = {08/2016}, abstract = {

Assessment of ecological and structrual changes induced by fire events is important for understanding the effects of fire, and planning future ecological and risk mitigation strategies. This study employs Terrestrial Laser Scanning (TLS) data captured at multiple points in time to monitor the changes in a dry sclerophyll forest induced by a prescribed burn. Point cloud data was collected for two plots; one plot undergoing a fire treatment, and the second plot remaining untreated, thereby acting as the control. Data was collected at three epochs (pre-fire, two weeks post fire and two years post fire). Coregistration of these multitemporal point clouds to within an acceptable tolerance was achieved through a two step process utilising permanent infield markers and manually extracted stem objects as reference targets. Metrics describing fuel height and fuel fragmentation were extracted from the point clouds for direct comparison with industry standard visual assessments. Measurements describing the change (or lack thereof) in the control plot indicate that the method of data capture and coregistration were achieved with the required accuracy to monitor fire induced change. Results from the fire affected plot show that immediately post fire 67\% of area had been burnt with the average fuel height decreasing from 0.33 to 0.13 m. At two years post-fire the fuel remained signicantly lower (0.11 m) and more fragmented in comparison to pre-fire levels. Results in both the control and fire altered plot were comparable to synchronus onground visual assessment. The advantage of TLS over the visual assessment method is, however, demonstrated through the use of two physical and spatially quantifiable metrics to describe fuel change. These results highlight the capabilities of multitemporal TLS data for measuring and mapping changes in the three dimensional structure of vegetation. Metrics from point clouds can be derived to provide quantified estimates of surface and near-surface fuel loss and accumulation, and inform prescribed burn efficacy and burn severity reporting.

}, doi = {10.3390/rs8080679}, url = {http://www.mdpi.com/2072-4292/8/8/679}, author = {Luke Wallace and Vaibhav Gupta and Karin Reinke and Simon Jones} } @article {bnh-3538, title = {Development of a Multi-Spatial Resolution Approach to the Surveillance of Active Fire Lines Using Himawari-8}, journal = {Remote Sensing}, volume = {8}, year = {2016}, month = {11/2016}, chapter = {932}, abstract = {

Satellite remote sensing is regularly used for wildfire detection, fire severity mapping and burnt area mapping. Applications in the surveillance of wildfire using geostationary-based sensors have been limited by low spatial resolutions. With the launch in 2015 of the AHI (Advanced Himawari Imaginer) sensor on board Himawari-8, ten-minute interval imagery is available covering an entire earth hemisphere across East Asia and Australasia. Existing active fire detection algorithms depend on middle infrared (MIR) and thermal infrared (TIR) channels to detect fire. Even though sub-pixel fire detection algorithms can detect much smaller fires, the location of the fire within the AHI 2 {\texttimes} 2 km (400 ha) MIR/TIR pixel is unknown. This limits the application of AHI as a wildfire surveillance and tracking sensor. A new multi-spatial resolution approach is presented in this paper that utilizes the available medium resolution channels in AHI. The proposed algorithm is able to map firelines at a 500 m resolution. This is achieved using near infrared (NIR) (1 km) and RED (500 m) data to detect burnt area and smoke within the flagged MIR (2 km) pixel. Initial results based on three case studies carried out in Western Australia shows that the algorithm was able to continuously track fires during the day at 500 m resolution. The results also demonstrate the utility for wildfire management activities.

}, doi = {http://dx.doi.org/10.3390/rs8110932}, url = {http://www.mdpi.com/2072-4292/8/11/932}, author = {Chathura Wickramasinghe and Simon Jones and Karin Reinke and Luke Wallace} } @article {bnh-2913, title = {Disaster landscape attribution: fire surveillance and hazard mapping, data scaling and validation: Annual project report}, number = {172}, year = {2016}, month = {08/2016}, institution = {Bushfire and Natural Hazards CRC}, address = {Melbourne}, abstract = {

This project attributes fire landscapes using the latest satellite based thermal earth observation systems for active fire surveillance. 3D remote sensing technologies have been trialed, and Structure from Motion (SfM) and Terrestrial Laser Scanning (TLS) technologies and techniques used to quantify and map changes in the landscape before, and after, a fire event. The project brings together researchers from around the world including RMIT, the German Aerospace Agency DLR, CSIRO, the University of Twente in the Netherlands, Geoscience Australia and the Bureau of Meteorology.

Combining these two aspects of this project allows for remote sensing to become a key tool in mitigating the risk of disaster caused by wildfire. Results demonstrated from the work completed in this project with feedback from project end-users highlight the potential for remote sensing tools to contribute to existing landscape management processes.\ This report provides a background to the project, discusses the key research questions being asked and describes the progress made. Key achievements over the last year are described and linked to research outputs and end user engagement and operations. The report concludes with activities planned for the year ahead and a list of currently integrated project members.

Highlights of 2015-2016 have included:

A workshop attended by local and interstate end users was held in December 2015 resulting in preliminary testing and data collection by end users in Victoria and South Australia. Two field\ days showcasing Fuels3D and seeking end user feedback have been scheduled for two days in July 2016 with participants attending from SA DEWNR, ACT Parks and Wildlife, Vic DELWP, Vic CFA, Parks Victoria and Melbourne Water.

}, issn = {172}, author = {Simon Jones and Karin Reinke and Luke Wallace} } @article {bnh-1893, title = {Assessing Metrics for Estimating Fire Induced Change in the Forest Understorey Structure Using Terrestrial Laser Scanning}, journal = {Remote Sensing}, volume = {7}, year = {2015}, month = {06/2015}, pages = {8180-8201}, chapter = {8180}, abstract = {

Quantifying post-fire effects in a forested landscape is important to ascertain burn severity, ecosystem recovery and post-fire hazard assessments and mitigation planning. Reporting of such post-fire effects assumes significance in fire-prone countries such as USA, Australia, Spain, Greece and Portugal where prescribed burns are routinely carried out. This paper describes the use of Terrestrial Laser Scanning (TLS) to estimate and map change in the forest understorey following a prescribed burn. Eighteen descriptive metrics are derived from bi-temporal TLS which are used to analyse and visualise change in a control and fire-altered plot. Metrics derived are Above Ground Height-based (AGH) percentiles and heights, point count and mean intensity. Metrics such as\ AGH50change,\ mean AGHchange\ and\ point countchange\ are sensitive enough to detect subtle fire-induced change (28\%{\textendash}52\%) whilst observing little or no change in the control plot (0{\textendash}4\%). A qualitative examination with field measurements of the spatial distribution of burnt areas and percentage area burnt also show similar patterns. This study is novel in that it examines the behaviour of TLS metrics for estimating and mapping fire induced change in understorey structure in a single-scan mode with a minimal fixed reference system. Further, the TLS-derived metrics can be used to produce high resolution maps of change in the understorey landscape

}, keywords = {Terrestrial LiDAR; change detection; understorey; prescribed burns; terrestrial laser scanning; single-scan; LiDAR metrics}, doi = {10.3390/rs70608180}, url = {http://www.mdpi.com/2072-4292/7/6/8180}, author = {Vaibhav Gupta and Karin Reinke and Simon Jones and Luke Wallace and Lucas Holden} }