Chris Schneider

Chris is Google's ÜTL for Machine Learning Security in Android. His work includes Platform, Application, and Data security initiatives across Android, Core, and PSS; he is also a security research partner with Google DeepMind.

His responsibilities include establishing secure-by-default implementation of Android software, and remediating emerging security gaps from ML-enabled technologies. He is a primary partner for Google's SAIF initiative.
More by Chris Schneider
WP_Query Object
(
    [query] => Array
        (
            [post_type] => Array
                (
                    [0] => post
                    [1] => webinars
                )

            [posts_per_page] => -1
            [post_status] => publish
            [meta_query] => Array
                (
                    [relation] => OR
                    [0] => Array
                        (
                            [key] => new_authors
                            [value] => "177"
                            [compare] => LIKE
                        )

                    [1] => Array
                        (
                            [key] => new_presenters
                            [value] => "177"
                            [compare] => LIKE
                        )

                )

        )

    [query_vars] => Array
        (
            [post_type] => Array
                (
                    [0] => post
                    [1] => webinars
                )

            [posts_per_page] => -1
            [post_status] => publish
            [meta_query] => Array
                (
                    [relation] => OR
                    [0] => Array
                        (
                            [key] => new_authors
                            [value] => "177"
                            [compare] => LIKE
                        )

                    [1] => Array
                        (
                            [key] => new_presenters
                            [value] => "177"
                            [compare] => LIKE
                        )

                )

            [error] => 
            [m] => 
            [p] => 0
            [post_parent] => 
            [subpost] => 
            [subpost_id] => 
            [attachment] => 
            [attachment_id] => 0
            [name] => 
            [pagename] => 
            [page_id] => 0
            [second] => 
            [minute] => 
            [hour] => 
            [day] => 0
            [monthnum] => 0
            [year] => 0
            [w] => 0
            [category_name] => 
            [tag] => 
            [cat] => 
            [tag_id] => 
            [author] => 
            [author_name] => 
            [feed] => 
            [tb] => 
            [paged] => 0
            [meta_key] => 
            [meta_value] => 
            [preview] => 
            [s] => 
            [sentence] => 
            [title] => 
            [fields] => 
            [menu_order] => 
            [embed] => 
            [category__in] => Array
                (
                )

            [category__not_in] => Array
                (
                )

            [category__and] => Array
                (
                )

            [post__in] => Array
                (
                )

            [post__not_in] => Array
                (
                )

            [post_name__in] => Array
                (
                )

            [tag__in] => Array
                (
                )

            [tag__not_in] => Array
                (
                )

            [tag__and] => Array
                (
                )

            [tag_slug__in] => Array
                (
                )

            [tag_slug__and] => Array
                (
                )

            [post_parent__in] => Array
                (
                )

            [post_parent__not_in] => Array
                (
                )

            [author__in] => Array
                (
                )

            [author__not_in] => Array
                (
                )

            [search_columns] => Array
                (
                )

            [ignore_sticky_posts] => 
            [suppress_filters] => 
            [cache_results] => 1
            [update_post_term_cache] => 1
            [update_menu_item_cache] => 
            [lazy_load_term_meta] => 1
            [update_post_meta_cache] => 1
            [nopaging] => 1
            [comments_per_page] => 50
            [no_found_rows] => 
            [order] => DESC
        )

    [tax_query] => WP_Tax_Query Object
        (
            [queries] => Array
                (
                )

            [relation] => AND
            [table_aliases:protected] => Array
                (
                )

            [queried_terms] => Array
                (
                )

            [primary_table] => wp_posts
            [primary_id_column] => ID
        )

    [meta_query] => WP_Meta_Query Object
        (
            [queries] => Array
                (
                    [0] => Array
                        (
                            [key] => new_authors
                            [value] => "177"
                            [compare] => LIKE
                        )

                    [1] => Array
                        (
                            [key] => new_presenters
                            [value] => "177"
                            [compare] => LIKE
                        )

                    [relation] => OR
                )

            [relation] => OR
            [meta_table] => wp_postmeta
            [meta_id_column] => post_id
            [primary_table] => wp_posts
            [primary_id_column] => ID
            [table_aliases:protected] => Array
                (
                    [0] => wp_postmeta
                )

            [clauses:protected] => Array
                (
                    [wp_postmeta] => Array
                        (
                            [key] => new_authors
                            [value] => "177"
                            [compare] => LIKE
                            [compare_key] => =
                            [alias] => wp_postmeta
                            [cast] => CHAR
                        )

                    [wp_postmeta-1] => Array
                        (
                            [key] => new_presenters
                            [value] => "177"
                            [compare] => LIKE
                            [compare_key] => =
                            [alias] => wp_postmeta
                            [cast] => CHAR
                        )

                )

            [has_or_relation:protected] => 1
        )

    [date_query] => 
    [request] => SELECT   wp_posts.ID
					 FROM wp_posts  INNER JOIN wp_postmeta ON ( wp_posts.ID = wp_postmeta.post_id )
					 WHERE 1=1  AND ( 
  ( wp_postmeta.meta_key = 'new_authors' AND wp_postmeta.meta_value LIKE '{0752c9c9a62bc96859cd02d0edf31e22197b37f897257ca530dabb2ce6cccc19}\"177\"{0752c9c9a62bc96859cd02d0edf31e22197b37f897257ca530dabb2ce6cccc19}' ) 
  OR 
  ( wp_postmeta.meta_key = 'new_presenters' AND wp_postmeta.meta_value LIKE '{0752c9c9a62bc96859cd02d0edf31e22197b37f897257ca530dabb2ce6cccc19}\"177\"{0752c9c9a62bc96859cd02d0edf31e22197b37f897257ca530dabb2ce6cccc19}' )
) AND wp_posts.post_type IN ('post', 'webinars') AND ((wp_posts.post_status = 'publish'))
					 GROUP BY wp_posts.ID
					 ORDER BY wp_posts.post_date DESC
					 
    [posts] => Array
        (
            [0] => WP_Post Object
                (
                    [ID] => 31564
                    [post_author] => 53
                    [post_date] => 2024-02-20 09:59:20
                    [post_date_gmt] => 2024-02-20 15:59:20
                    [post_content] => 




Watch Now

Overview

Incorporating Artificial Intelligence (AI) into your business or developing your own machine learning (ML) models can be exciting! Whether you are purchasing out-of-the-box AI solutions or developing your own Large Language Models (LLMs), ensuring a secure foundation from the start is paramount — and not for the faint of heart.  

Looking for guidance on how to safely adopt generative AI? Look no further. There’s no better guiding light than other security leaders that have already experienced the process — or are going through it as we speak.  

NetSPI Field CISO, Nabil Hannan, welcomed two AI security leaders for a discussion on what they’ve learned throughout their experiences implementing Generative AI in their companies. Chris Schneider, Senior Staff Security Engineer at Google, and Tim Schulz, Distinguished Engineer, AI Red Team at Verizon, shared their perspectives on cybersecurity considerations companies should address before integrating AI into their systems and proactive measures can organizations take to avoid some of the most common cybersecurity pitfalls teams face. 

Access the on-demand webinar to hear their discussion on:  

  • Cybersecurity questions to ask before starting your AI journey  
  • Common pitfalls and challenges you can avoid 
  • Stories from security leaders on the top lessons they’ve learned   
  • Security testing approaches for AI-based systems  
  • And more! 

Key Highlights 

03:27 - AI as a misnomer 
12:22 – What to consider before implementing AI 
17:51 – Aligning AI initiatives with cybersecurity goals 
10:41 - Perspectives on community guidance 
24:35 - Cybersecurity pitfalls with Generative AI 
34:51 - Testing AI-based systems vs traditional software 
41:50 - Security testing for AI-based systems 
47:58 – Lessons learned from implementing AI 

Artificial Intelligence can be a misnomer because it implies that there’s a form of sentience behind the technology. In most cases when talking about AI, we’re talking about technology that digests large amounts of data and gives an output quickly. Can you share your perspective on the technology and how it’s named?  

Tim: Tim explains that generative AI has influenced the discourse on the essence of artificial intelligence, sparking debates over terminology. The widespread familiarity with AI, thanks to its portrayal in Hollywood and elsewhere, has led to diverse interpretations. However, he notes the existing definition fails to accommodate the nuanced discussions necessitated by technological advancements. This discrepancy poses a significant challenge. While the term "AI" is easily recognizable to the general public, the field's rapid evolution demands a reassessment of foundational definitions. Expert opinions vary, which is why discussions like these are constructive because it’s better to have diverse perspectives rather than categorizing any particular viewpoint as unpopular. 

Chris: Chris makes a case for AI being a term more widely recognized by the public compared to machine learning. The historical marketing associated with AI makes it more familiar to people and increases its appeal. However, he cautions the influence of popular media may distort factual aspects and contribute to exaggerated claims, often made by celebrities. As a scientist, he advocates for a cautious approach, emphasizing the importance of basing discussions on demonstrated capabilities and evidence from past experiences. Differing opinions can be valid if they are not sensational, such as concerns about a robot uprising, which is divergent from the field's focus on probabilistic forecasting and observed behaviors. AI is a process involving memorization, repetition, and probabilistic synthesis rather than independent intelligence or foresight. 

What are some aspects to consider before organizations start their journey to leverage AI-based technologies? Are there common pitfalls that organizations run into? 

Tim: Tim believes it’s important to assess available resources for AI adoption. AI isn’t a simplistic, plug-and-play solution. Rather it has significant infrastructure and engineering efforts necessary for seamless integration. The complexity results in a vital need to dedicate resources and adopt a comprehensive approach. Moreover, AI literacy plays a crucial role in facilitating effective communication and decision-making.  

Tim cautions against the risk of being outmaneuvered in discussions by vendors and advocates for seeking partnerships or trusted advisors to bridge knowledge gaps. The industry needs to embrace continuous learning and adaptation in response to evolving regulations and the dynamic nature of AI technology. Outsourcing can be a viable option to streamline operations for those reluctant to commit to ongoing maintenance and operational efforts. 

Are there ways organizations can ensure their AI initiatives align with their cybersecurity goals and protocols? 

Chris: Speaking as a prospective employee at Google, but not officially on behalf of Google, Chris explains one of the ways he approaches this is to use the Android AppSec Knowledgebase within Android Studio. This tool provides developers with real-time alerts regarding common errors or security risks, often accompanied by quick fixes. It’s updated with ongoing efforts to expand its functionality to encompass machine learning implementations, aligning with Google's Secure AI Framework (SAIF). The framework offers guidelines and controls to address security concerns associated with ML technologies, although it may not cover all emerging issues, prompting ongoing research and development. Chris emphasizes the adaptability of these controls to suit different organizational needs and highlights their open-source nature, allowing individuals to apply custom logic. He mentions drawing inspiration from existing literature and industry feedback, aiming to contribute positively to the community, while acknowledging the learning curve and the complexity involved. 

Do you have any perspectives on the community guidance that’s being generated? Anything you’re hoping to see in the future?  

Tim: Tim notes a significant challenge in the AI domain is the gap between widespread knowledge and expert-driven understanding. Despite the rapid advancements in AI, Tim observes a lack of comprehensive knowledge across organizations due to the sheer volume of developments.  

Community efforts have had a positive impact on sharing knowledge so far, but challenges remain in discerning quality information amidst the abundance of resources. Major tech companies like Google, Meta, and Microsoft have contributed by releasing tools and addressing AI security concerns, facilitated by recent executive orders. However, the absence of a common toolset for testing models remains a challenge. Tim commends the efforts of large players in the industry to democratize expertise but acknowledges the ongoing barrier posed by the need for specialized knowledge. Broadening discussions beyond model deployment is important to address emerging complexities in AI. 

What have you seen as some of the most common cybersecurity pitfalls that organizations have encountered when they implement AI technologies? Do you have any recommendations to avoid those? 

Tim: Tim says it’s inevitable that Generative AI will permeate organizations in various capacities, requiring heightened security measures. AI literacy is essential in understanding and safeguarding AI systems, which differs significantly from conventional web application protection.  

Notably, crafting incident response plans for AI incidents poses unique challenges, given the distinct log sources and visibility gaps inherent in AI systems. While efforts to detect issues like data poisoning are underway, they remain primarily in the research phase. Explainable AI and AI transparency is incredibly important in enhancing visibility for security teams.  

Distinguishing between regular incident response and AI incident response processes is crucial, potentially involving different teams and protocols. Dynamics are shifting within data science teams, now grappling with newfound attention and security concerns due to Generative AI. Bridging the gap between data science and cybersecurity teams requires fostering collaboration and adapting to evolving processes. Legal considerations also come into play, as compliance requirements for AI systems necessitate legal counsel involvement in decision-making processes.  

These ongoing discussions reflect the dynamic nature of AI security and underscore the need for continual adaptation and collaboration among stakeholders. The field is developing rapidly with new advancements emerging often on a daily, weekly, or even hourly basis. Drawing from personal experience, Tim emphasizes the unprecedented speed at which research transitions into practical applications and proof-of-concepts (POCs), ultimately integrating into products. This remarkable acceleration from research to productization represents an unparalleled advancement in technology maturity timelines. 

Chris: The concept of "adopt and adapt" is helpful here, noting both traditional and emerging issues with code execution. Machine learning introduces unintentional variants in input and output, posing challenges for software developers. A modified approach for machine learning has multiple stages, including pre-training and post-deployment sets. While traditional infrastructure controls may suffice, addressing non-infrastructure controls, particularly on devices, proves more challenging due to physical possession advantages. Hybrid models, such as those seen in the gaming industry, offer a viable approach, particularly for mitigating risks like piracy. He highlights the need for robust assurances in machine learning usage, especially concerning compliance and ethical considerations. 

Traditional software testing paradigms may not apply to AI-based systems that are non-deterministic. What makes testing AI-based systems unique compared to traditional software?  

Chris: Considering security aspects, the focus is on achieving security parity with current controls. However, addressing emerging threats or new capabilities in machine learning poses challenges. If existing controls prove inadequate for these scenarios, alternative approaches must be explored. For instance, the synthesis of identity presents significant concerns, as advancements in technology allow for sophisticated audio synthesis with minimal sample data requirements. This underscores the need for proactive measures to address evolving security risks. 

In security, the focus is on achieving security parity with current controls while addressing emerging threats or new capabilities in machine learning. For instance, the synthesis of identity presents significant concerns, as advancements in technology enable sophisticated audio and video synthesis, allowing for impersonation and potentially fraudulent activities. Preventing such misuse is a pressing concern, with efforts aimed at developing semantic and provable solutions to combat these challenges.  

Additionally, there's a distinction between stochastic and non-stochastic software, with an increasing emphasis on the collection of vast amounts of data without strict domain and range boundaries. This shift challenges traditional security principles, particularly the importance of authenticating data before processing it, as emphasized by Moxie Marlinspike's "Doom principle."  

Despite the widespread acceptance of indiscriminate data ingestion, there's growing recognition of the risks associated with it, such as prompt injection and astroturfing. Testing the security of systems against inconsistent behaviors and untrusted data sources has always been challenging, with approaches like utility functions proposed to address these complexities. Finding the right balance between control and innovation remains a central dilemma, with both excessive control and insufficient oversight posing risks to the integrity and reliability of systems. 

From a Red Teaming perspective, what measures should organizations take to ensure comprehensive security testing for AI-based systems? What tips or tricks have been effective in your experience that you wish you had known earlier? 

Tim: Tim explains that one of the aspects organizations need to consider is the testing phase, especially during deployment of AI-based systems like web applications integrated with language models. Understanding the intended behavior is crucial, and simulating user interactions helps in documenting various use cases accurately. Cost is another significant aspect to evaluate, as API usage can incur charges based on request/response rates. Red teaming or penetration testing should explore context length expansion tactics to avoid unforeseen financial burdens, especially when manipulating parameters to change response lengths.  

Efficient resource utilization is paramount, considering that most organizations won't deploy or train massive models due to cost constraints. Therefore, managing expenses and implementing guardrails for API usage becomes imperative. Additionally, safeguarding brand reputation is crucial, particularly for public-facing platforms, where Generative AI content could potentially lead to negative publicity if misused. Thus, a comprehensive approach to security and Red Teaming in AI systems involves addressing not only technical controls but also considering broader implications and partnering with responsible AI teams to mitigate risks effectively. 

If you could go back in time and share one lesson with your younger self that would have helped on your AI journey, what would it be? 

Chris: Synthesizing content can offer benefits, yet it entails inherent trade-offs. The ability to produce unique interactions correlates with the tolerance for risk that the business is willing to accept. This aspect is quantified by a term known as "temperature" in business jargon. Conversely, if the generated content pertains to sensitive information like payment details, it can present challenges that need careful consideration before implementation. Miguel Rodriguez's suggestion regarding pre- and post-training, as well as pre- and post-deployment phases, serves as an excellent starting point. Additionally, augmenting these phases with considerations for networking, hardware, operating systems, and application context helps fortify the threat model review process. 

Tim: Similar to what Chris mentioned, sending specific resources on honing in on lessons about neural networks could be beneficial. Overall, the key is to continue using these systems. Besides understanding the theory, interacting with the systems and trying different prompts is crucial. Experimenting with advertised hacks and cheats found online can provide insights into their effectiveness. Diversity of thought is important as it offers various approaches to exploring these systems. Therefore, focusing on experimentation and continual learning is essential for gaining knowledge in this field. 

Hear the full discussion between Nabil, Chris, and Tim by requesting the on-demand webinar using the form above or continue your AI security learning by accessing our eBook, “The CISO’s Guide to Securing AI/ML Models.” 

[wonderplugin_video iframe="https://youtu.be/LC9E44mDJEY" lightbox=0 lightboxsize=1 lightboxwidth=1200 lightboxheight=674.999999999999916 autoopen=0 autoopendelay=0 autoclose=0 lightboxtitle="" lightboxgroup="" lightboxshownavigation=0 showimage="" lightboxoptions="" videowidth=1200 videoheight=674.999999999999916 keepaspectratio=1 autoplay=0 loop=0 videocss="position:relative;display:block;background-color:#000;overflow:hidden;max-width:100%;margin:0 auto;" playbutton="https://www.netspi.com/wp-content/plugins/wonderplugin-video-embed/engine/playvideo-64-64-0.png"]

[post_title] => Hindsight’s 20/20: What Security Leaders Wish They Knew Before Implementing Generative AI  [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => what-to-know-before-implementing-generative-ai [to_ping] => [pinged] => [post_modified] => 2024-03-27 13:40:01 [post_modified_gmt] => 2024-03-27 18:40:01 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?post_type=webinars&p=31564 [menu_order] => 3 [post_type] => webinars [post_mime_type] => [comment_count] => 0 [filter] => raw ) ) [post_count] => 1 [current_post] => -1 [before_loop] => 1 [in_the_loop] => [post] => WP_Post Object ( [ID] => 31564 [post_author] => 53 [post_date] => 2024-02-20 09:59:20 [post_date_gmt] => 2024-02-20 15:59:20 [post_content] =>
Watch Now

Overview

Incorporating Artificial Intelligence (AI) into your business or developing your own machine learning (ML) models can be exciting! Whether you are purchasing out-of-the-box AI solutions or developing your own Large Language Models (LLMs), ensuring a secure foundation from the start is paramount — and not for the faint of heart.  

Looking for guidance on how to safely adopt generative AI? Look no further. There’s no better guiding light than other security leaders that have already experienced the process — or are going through it as we speak.  

NetSPI Field CISO, Nabil Hannan, welcomed two AI security leaders for a discussion on what they’ve learned throughout their experiences implementing Generative AI in their companies. Chris Schneider, Senior Staff Security Engineer at Google, and Tim Schulz, Distinguished Engineer, AI Red Team at Verizon, shared their perspectives on cybersecurity considerations companies should address before integrating AI into their systems and proactive measures can organizations take to avoid some of the most common cybersecurity pitfalls teams face. 

Access the on-demand webinar to hear their discussion on:  

  • Cybersecurity questions to ask before starting your AI journey  
  • Common pitfalls and challenges you can avoid 
  • Stories from security leaders on the top lessons they’ve learned   
  • Security testing approaches for AI-based systems  
  • And more! 

Key Highlights 

03:27 - AI as a misnomer 
12:22 – What to consider before implementing AI 
17:51 – Aligning AI initiatives with cybersecurity goals 
10:41 - Perspectives on community guidance 
24:35 - Cybersecurity pitfalls with Generative AI 
34:51 - Testing AI-based systems vs traditional software 
41:50 - Security testing for AI-based systems 
47:58 – Lessons learned from implementing AI 

Artificial Intelligence can be a misnomer because it implies that there’s a form of sentience behind the technology. In most cases when talking about AI, we’re talking about technology that digests large amounts of data and gives an output quickly. Can you share your perspective on the technology and how it’s named?  

Tim: Tim explains that generative AI has influenced the discourse on the essence of artificial intelligence, sparking debates over terminology. The widespread familiarity with AI, thanks to its portrayal in Hollywood and elsewhere, has led to diverse interpretations. However, he notes the existing definition fails to accommodate the nuanced discussions necessitated by technological advancements. This discrepancy poses a significant challenge. While the term "AI" is easily recognizable to the general public, the field's rapid evolution demands a reassessment of foundational definitions. Expert opinions vary, which is why discussions like these are constructive because it’s better to have diverse perspectives rather than categorizing any particular viewpoint as unpopular. 

Chris: Chris makes a case for AI being a term more widely recognized by the public compared to machine learning. The historical marketing associated with AI makes it more familiar to people and increases its appeal. However, he cautions the influence of popular media may distort factual aspects and contribute to exaggerated claims, often made by celebrities. As a scientist, he advocates for a cautious approach, emphasizing the importance of basing discussions on demonstrated capabilities and evidence from past experiences. Differing opinions can be valid if they are not sensational, such as concerns about a robot uprising, which is divergent from the field's focus on probabilistic forecasting and observed behaviors. AI is a process involving memorization, repetition, and probabilistic synthesis rather than independent intelligence or foresight. 

What are some aspects to consider before organizations start their journey to leverage AI-based technologies? Are there common pitfalls that organizations run into? 

Tim: Tim believes it’s important to assess available resources for AI adoption. AI isn’t a simplistic, plug-and-play solution. Rather it has significant infrastructure and engineering efforts necessary for seamless integration. The complexity results in a vital need to dedicate resources and adopt a comprehensive approach. Moreover, AI literacy plays a crucial role in facilitating effective communication and decision-making.  

Tim cautions against the risk of being outmaneuvered in discussions by vendors and advocates for seeking partnerships or trusted advisors to bridge knowledge gaps. The industry needs to embrace continuous learning and adaptation in response to evolving regulations and the dynamic nature of AI technology. Outsourcing can be a viable option to streamline operations for those reluctant to commit to ongoing maintenance and operational efforts. 

Are there ways organizations can ensure their AI initiatives align with their cybersecurity goals and protocols? 

Chris: Speaking as a prospective employee at Google, but not officially on behalf of Google, Chris explains one of the ways he approaches this is to use the Android AppSec Knowledgebase within Android Studio. This tool provides developers with real-time alerts regarding common errors or security risks, often accompanied by quick fixes. It’s updated with ongoing efforts to expand its functionality to encompass machine learning implementations, aligning with Google's Secure AI Framework (SAIF). The framework offers guidelines and controls to address security concerns associated with ML technologies, although it may not cover all emerging issues, prompting ongoing research and development. Chris emphasizes the adaptability of these controls to suit different organizational needs and highlights their open-source nature, allowing individuals to apply custom logic. He mentions drawing inspiration from existing literature and industry feedback, aiming to contribute positively to the community, while acknowledging the learning curve and the complexity involved. 

Do you have any perspectives on the community guidance that’s being generated? Anything you’re hoping to see in the future?  

Tim: Tim notes a significant challenge in the AI domain is the gap between widespread knowledge and expert-driven understanding. Despite the rapid advancements in AI, Tim observes a lack of comprehensive knowledge across organizations due to the sheer volume of developments.  

Community efforts have had a positive impact on sharing knowledge so far, but challenges remain in discerning quality information amidst the abundance of resources. Major tech companies like Google, Meta, and Microsoft have contributed by releasing tools and addressing AI security concerns, facilitated by recent executive orders. However, the absence of a common toolset for testing models remains a challenge. Tim commends the efforts of large players in the industry to democratize expertise but acknowledges the ongoing barrier posed by the need for specialized knowledge. Broadening discussions beyond model deployment is important to address emerging complexities in AI. 

What have you seen as some of the most common cybersecurity pitfalls that organizations have encountered when they implement AI technologies? Do you have any recommendations to avoid those? 

Tim: Tim says it’s inevitable that Generative AI will permeate organizations in various capacities, requiring heightened security measures. AI literacy is essential in understanding and safeguarding AI systems, which differs significantly from conventional web application protection.  

Notably, crafting incident response plans for AI incidents poses unique challenges, given the distinct log sources and visibility gaps inherent in AI systems. While efforts to detect issues like data poisoning are underway, they remain primarily in the research phase. Explainable AI and AI transparency is incredibly important in enhancing visibility for security teams.  

Distinguishing between regular incident response and AI incident response processes is crucial, potentially involving different teams and protocols. Dynamics are shifting within data science teams, now grappling with newfound attention and security concerns due to Generative AI. Bridging the gap between data science and cybersecurity teams requires fostering collaboration and adapting to evolving processes. Legal considerations also come into play, as compliance requirements for AI systems necessitate legal counsel involvement in decision-making processes.  

These ongoing discussions reflect the dynamic nature of AI security and underscore the need for continual adaptation and collaboration among stakeholders. The field is developing rapidly with new advancements emerging often on a daily, weekly, or even hourly basis. Drawing from personal experience, Tim emphasizes the unprecedented speed at which research transitions into practical applications and proof-of-concepts (POCs), ultimately integrating into products. This remarkable acceleration from research to productization represents an unparalleled advancement in technology maturity timelines. 

Chris: The concept of "adopt and adapt" is helpful here, noting both traditional and emerging issues with code execution. Machine learning introduces unintentional variants in input and output, posing challenges for software developers. A modified approach for machine learning has multiple stages, including pre-training and post-deployment sets. While traditional infrastructure controls may suffice, addressing non-infrastructure controls, particularly on devices, proves more challenging due to physical possession advantages. Hybrid models, such as those seen in the gaming industry, offer a viable approach, particularly for mitigating risks like piracy. He highlights the need for robust assurances in machine learning usage, especially concerning compliance and ethical considerations. 

Traditional software testing paradigms may not apply to AI-based systems that are non-deterministic. What makes testing AI-based systems unique compared to traditional software?  

Chris: Considering security aspects, the focus is on achieving security parity with current controls. However, addressing emerging threats or new capabilities in machine learning poses challenges. If existing controls prove inadequate for these scenarios, alternative approaches must be explored. For instance, the synthesis of identity presents significant concerns, as advancements in technology allow for sophisticated audio synthesis with minimal sample data requirements. This underscores the need for proactive measures to address evolving security risks. 

In security, the focus is on achieving security parity with current controls while addressing emerging threats or new capabilities in machine learning. For instance, the synthesis of identity presents significant concerns, as advancements in technology enable sophisticated audio and video synthesis, allowing for impersonation and potentially fraudulent activities. Preventing such misuse is a pressing concern, with efforts aimed at developing semantic and provable solutions to combat these challenges.  

Additionally, there's a distinction between stochastic and non-stochastic software, with an increasing emphasis on the collection of vast amounts of data without strict domain and range boundaries. This shift challenges traditional security principles, particularly the importance of authenticating data before processing it, as emphasized by Moxie Marlinspike's "Doom principle."  

Despite the widespread acceptance of indiscriminate data ingestion, there's growing recognition of the risks associated with it, such as prompt injection and astroturfing. Testing the security of systems against inconsistent behaviors and untrusted data sources has always been challenging, with approaches like utility functions proposed to address these complexities. Finding the right balance between control and innovation remains a central dilemma, with both excessive control and insufficient oversight posing risks to the integrity and reliability of systems. 

From a Red Teaming perspective, what measures should organizations take to ensure comprehensive security testing for AI-based systems? What tips or tricks have been effective in your experience that you wish you had known earlier? 

Tim: Tim explains that one of the aspects organizations need to consider is the testing phase, especially during deployment of AI-based systems like web applications integrated with language models. Understanding the intended behavior is crucial, and simulating user interactions helps in documenting various use cases accurately. Cost is another significant aspect to evaluate, as API usage can incur charges based on request/response rates. Red teaming or penetration testing should explore context length expansion tactics to avoid unforeseen financial burdens, especially when manipulating parameters to change response lengths.  

Efficient resource utilization is paramount, considering that most organizations won't deploy or train massive models due to cost constraints. Therefore, managing expenses and implementing guardrails for API usage becomes imperative. Additionally, safeguarding brand reputation is crucial, particularly for public-facing platforms, where Generative AI content could potentially lead to negative publicity if misused. Thus, a comprehensive approach to security and Red Teaming in AI systems involves addressing not only technical controls but also considering broader implications and partnering with responsible AI teams to mitigate risks effectively. 

If you could go back in time and share one lesson with your younger self that would have helped on your AI journey, what would it be? 

Chris: Synthesizing content can offer benefits, yet it entails inherent trade-offs. The ability to produce unique interactions correlates with the tolerance for risk that the business is willing to accept. This aspect is quantified by a term known as "temperature" in business jargon. Conversely, if the generated content pertains to sensitive information like payment details, it can present challenges that need careful consideration before implementation. Miguel Rodriguez's suggestion regarding pre- and post-training, as well as pre- and post-deployment phases, serves as an excellent starting point. Additionally, augmenting these phases with considerations for networking, hardware, operating systems, and application context helps fortify the threat model review process. 

Tim: Similar to what Chris mentioned, sending specific resources on honing in on lessons about neural networks could be beneficial. Overall, the key is to continue using these systems. Besides understanding the theory, interacting with the systems and trying different prompts is crucial. Experimenting with advertised hacks and cheats found online can provide insights into their effectiveness. Diversity of thought is important as it offers various approaches to exploring these systems. Therefore, focusing on experimentation and continual learning is essential for gaining knowledge in this field. 

Hear the full discussion between Nabil, Chris, and Tim by requesting the on-demand webinar using the form above or continue your AI security learning by accessing our eBook, “The CISO’s Guide to Securing AI/ML Models.” 

[wonderplugin_video iframe="https://youtu.be/LC9E44mDJEY" lightbox=0 lightboxsize=1 lightboxwidth=1200 lightboxheight=674.999999999999916 autoopen=0 autoopendelay=0 autoclose=0 lightboxtitle="" lightboxgroup="" lightboxshownavigation=0 showimage="" lightboxoptions="" videowidth=1200 videoheight=674.999999999999916 keepaspectratio=1 autoplay=0 loop=0 videocss="position:relative;display:block;background-color:#000;overflow:hidden;max-width:100%;margin:0 auto;" playbutton="https://www.netspi.com/wp-content/plugins/wonderplugin-video-embed/engine/playvideo-64-64-0.png"]

[post_title] => Hindsight’s 20/20: What Security Leaders Wish They Knew Before Implementing Generative AI  [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => what-to-know-before-implementing-generative-ai [to_ping] => [pinged] => [post_modified] => 2024-03-27 13:40:01 [post_modified_gmt] => 2024-03-27 18:40:01 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?post_type=webinars&p=31564 [menu_order] => 3 [post_type] => webinars [post_mime_type] => [comment_count] => 0 [filter] => raw ) [comment_count] => 0 [current_comment] => -1 [found_posts] => 1 [max_num_pages] => 0 [max_num_comment_pages] => 0 [is_single] => [is_preview] => [is_page] => [is_archive] => [is_date] => [is_year] => [is_month] => [is_day] => [is_time] => [is_author] => [is_category] => [is_tag] => [is_tax] => [is_search] => [is_feed] => [is_comment_feed] => [is_trackback] => [is_home] => 1 [is_privacy_policy] => [is_404] => [is_embed] => [is_paged] => [is_admin] => [is_attachment] => [is_singular] => [is_robots] => [is_favicon] => [is_posts_page] => [is_post_type_archive] => [query_vars_hash:WP_Query:private] => f777e5b466ddd72db83774fbc919e430 [query_vars_changed:WP_Query:private] => [thumbnails_cached] => [allow_query_attachment_by_filename:protected] => [stopwords:WP_Query:private] => [compat_fields:WP_Query:private] => Array ( [0] => query_vars_hash [1] => query_vars_changed ) [compat_methods:WP_Query:private] => Array ( [0] => init_query_flags [1] => parse_tax_query ) )

Discover how the NetSPI BAS solution helps organizations validate the efficacy of existing security controls and understand their Security Posture and Readiness.

X