7#include <aws/ec2/EC2_EXPORTS.h>
8#include <aws/core/utils/memory/stl/AWSStreamFwd.h>
9#include <aws/core/utils/memory/stl/AWSVector.h>
10#include <aws/ec2/model/InferenceDeviceInfo.h>
51 template<
typename AcceleratorsT = Aws::Vector<InferenceDeviceInfo>>
52 void SetAccelerators(AcceleratorsT&& value) { m_acceleratorsHasBeenSet =
true; m_accelerators = std::forward<AcceleratorsT>(value); }
53 template<
typename AcceleratorsT = Aws::Vector<InferenceDeviceInfo>>
55 template<
typename AcceleratorsT = InferenceDeviceInfo>
72 bool m_acceleratorsHasBeenSet =
false;
74 int m_totalInferenceMemoryInMiB{0};
75 bool m_totalInferenceMemoryInMiBHasBeenSet =
false;
int GetTotalInferenceMemoryInMiB() const
AWS_EC2_API InferenceAcceleratorInfo(const Aws::Utils::Xml::XmlNode &xmlNode)
bool AcceleratorsHasBeenSet() const
void SetTotalInferenceMemoryInMiB(int value)
InferenceAcceleratorInfo & WithTotalInferenceMemoryInMiB(int value)
void SetAccelerators(AcceleratorsT &&value)
AWS_EC2_API InferenceAcceleratorInfo()=default
AWS_EC2_API void OutputToStream(Aws::OStream &oStream, const char *location) const
InferenceAcceleratorInfo & AddAccelerators(AcceleratorsT &&value)
bool TotalInferenceMemoryInMiBHasBeenSet() const
AWS_EC2_API void OutputToStream(Aws::OStream &ostream, const char *location, unsigned index, const char *locationValue) const
const Aws::Vector< InferenceDeviceInfo > & GetAccelerators() const
AWS_EC2_API InferenceAcceleratorInfo & operator=(const Aws::Utils::Xml::XmlNode &xmlNode)
InferenceAcceleratorInfo & WithAccelerators(AcceleratorsT &&value)
std::vector< T, Aws::Allocator< T > > Vector
std::basic_ostream< char, std::char_traits< char > > OStream